VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66906

Last change on this file since 66906 was 66906, checked in by vboxsync, 8 years ago

IEM: Implemented vmovsd Vsd,Hsd,Usd (VEX.F2.0F 10 mod=3), vmovsd Vsd,Mq (VEX.F2.0F 10 mod!=3).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 623.3 KB
Line 
1/* $Id: IEMAll.cpp 66906 2017-05-16 09:58:00Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218/**
219 * CPU exception classes.
220 */
221typedef enum IEMXCPTCLASS
222{
223 IEMXCPTCLASS_BENIGN,
224 IEMXCPTCLASS_CONTRIBUTORY,
225 IEMXCPTCLASS_PAGE_FAULT
226} IEMXCPTCLASS;
227
228
229/*********************************************************************************************************************************
230* Defined Constants And Macros *
231*********************************************************************************************************************************/
232/** @def IEM_WITH_SETJMP
233 * Enables alternative status code handling using setjmps.
234 *
235 * This adds a bit of expense via the setjmp() call since it saves all the
236 * non-volatile registers. However, it eliminates return code checks and allows
237 * for more optimal return value passing (return regs instead of stack buffer).
238 */
239#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
240# define IEM_WITH_SETJMP
241#endif
242
243/** Temporary hack to disable the double execution. Will be removed in favor
244 * of a dedicated execution mode in EM. */
245//#define IEM_VERIFICATION_MODE_NO_REM
246
247/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
248 * due to GCC lacking knowledge about the value range of a switch. */
249#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
250
251/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
252#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
253
254/**
255 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
256 * occation.
257 */
258#ifdef LOG_ENABLED
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 do { \
261 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
262 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
263 } while (0)
264#else
265# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
266 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
267#endif
268
269/**
270 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
271 * occation using the supplied logger statement.
272 *
273 * @param a_LoggerArgs What to log on failure.
274 */
275#ifdef LOG_ENABLED
276# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
277 do { \
278 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
279 /*LogFunc(a_LoggerArgs);*/ \
280 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
281 } while (0)
282#else
283# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
284 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
285#endif
286
287/**
288 * Call an opcode decoder function.
289 *
290 * We're using macors for this so that adding and removing parameters can be
291 * done as we please. See FNIEMOP_DEF.
292 */
293#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
294
295/**
296 * Call a common opcode decoder function taking one extra argument.
297 *
298 * We're using macors for this so that adding and removing parameters can be
299 * done as we please. See FNIEMOP_DEF_1.
300 */
301#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
302
303/**
304 * Call a common opcode decoder function taking one extra argument.
305 *
306 * We're using macors for this so that adding and removing parameters can be
307 * done as we please. See FNIEMOP_DEF_1.
308 */
309#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
310
311/**
312 * Check if we're currently executing in real or virtual 8086 mode.
313 *
314 * @returns @c true if it is, @c false if not.
315 * @param a_pVCpu The IEM state of the current CPU.
316 */
317#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
318
319/**
320 * Check if we're currently executing in virtual 8086 mode.
321 *
322 * @returns @c true if it is, @c false if not.
323 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
324 */
325#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
326
327/**
328 * Check if we're currently executing in long mode.
329 *
330 * @returns @c true if it is, @c false if not.
331 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
332 */
333#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
334
335/**
336 * Check if we're currently executing in real mode.
337 *
338 * @returns @c true if it is, @c false if not.
339 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
340 */
341#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
342
343/**
344 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
345 * @returns PCCPUMFEATURES
346 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
347 */
348#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
349
350/**
351 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
352 * @returns PCCPUMFEATURES
353 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
354 */
355#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
356
357/**
358 * Evaluates to true if we're presenting an Intel CPU to the guest.
359 */
360#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
361
362/**
363 * Evaluates to true if we're presenting an AMD CPU to the guest.
364 */
365#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
366
367/**
368 * Check if the address is canonical.
369 */
370#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
371
372/** @def IEM_USE_UNALIGNED_DATA_ACCESS
373 * Use unaligned accesses instead of elaborate byte assembly. */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
375# define IEM_USE_UNALIGNED_DATA_ACCESS
376#endif
377
378#ifdef VBOX_WITH_NESTED_HWVIRT
379/**
380 * Check the common SVM instruction preconditions.
381 */
382# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
383 do { \
384 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
385 { \
386 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
387 return iemRaiseUndefinedOpcode(pVCpu); \
388 } \
389 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
390 { \
391 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
392 return iemRaiseUndefinedOpcode(pVCpu); \
393 } \
394 if (pVCpu->iem.s.uCpl != 0) \
395 { \
396 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
397 return iemRaiseGeneralProtectionFault0(pVCpu); \
398 } \
399 } while (0)
400
401/**
402 * Check if an SVM is enabled.
403 */
404# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
405
406/**
407 * Check if an SVM control/instruction intercept is set.
408 */
409# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
410
411/**
412 * Check if an SVM read CRx intercept is set.
413 */
414# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
415
416/**
417 * Check if an SVM write CRx intercept is set.
418 */
419# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
420
421/**
422 * Check if an SVM read DRx intercept is set.
423 */
424# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
425
426/**
427 * Check if an SVM write DRx intercept is set.
428 */
429# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
430
431/**
432 * Check if an SVM exception intercept is set.
433 */
434# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
435
436/**
437 * Invokes the SVM \#VMEXIT handler for the nested-guest.
438 */
439# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
440 do \
441 { \
442 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
443 (a_uExitInfo2)); \
444 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
445 } while (0)
446
447/**
448 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
449 * corresponding decode assist information.
450 */
451# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
452 do \
453 { \
454 uint64_t uExitInfo1; \
455 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
456 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
457 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
458 else \
459 uExitInfo1 = 0; \
460 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
461 } while (0)
462
463/**
464 * Checks and handles an SVM MSR intercept.
465 */
466# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
467 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
468
469#else
470# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
471# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
472# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
473# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
474# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
475# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
476# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
477# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
478# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
479# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
480# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1)
481
482#endif /* VBOX_WITH_NESTED_HWVIRT */
483
484
485/*********************************************************************************************************************************
486* Global Variables *
487*********************************************************************************************************************************/
488extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
489
490
491/** Function table for the ADD instruction. */
492IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
493{
494 iemAImpl_add_u8, iemAImpl_add_u8_locked,
495 iemAImpl_add_u16, iemAImpl_add_u16_locked,
496 iemAImpl_add_u32, iemAImpl_add_u32_locked,
497 iemAImpl_add_u64, iemAImpl_add_u64_locked
498};
499
500/** Function table for the ADC instruction. */
501IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
502{
503 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
504 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
505 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
506 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
507};
508
509/** Function table for the SUB instruction. */
510IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
511{
512 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
513 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
514 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
515 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
516};
517
518/** Function table for the SBB instruction. */
519IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
520{
521 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
522 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
523 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
524 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
525};
526
527/** Function table for the OR instruction. */
528IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
529{
530 iemAImpl_or_u8, iemAImpl_or_u8_locked,
531 iemAImpl_or_u16, iemAImpl_or_u16_locked,
532 iemAImpl_or_u32, iemAImpl_or_u32_locked,
533 iemAImpl_or_u64, iemAImpl_or_u64_locked
534};
535
536/** Function table for the XOR instruction. */
537IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
538{
539 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
540 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
541 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
542 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
543};
544
545/** Function table for the AND instruction. */
546IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
547{
548 iemAImpl_and_u8, iemAImpl_and_u8_locked,
549 iemAImpl_and_u16, iemAImpl_and_u16_locked,
550 iemAImpl_and_u32, iemAImpl_and_u32_locked,
551 iemAImpl_and_u64, iemAImpl_and_u64_locked
552};
553
554/** Function table for the CMP instruction.
555 * @remarks Making operand order ASSUMPTIONS.
556 */
557IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
558{
559 iemAImpl_cmp_u8, NULL,
560 iemAImpl_cmp_u16, NULL,
561 iemAImpl_cmp_u32, NULL,
562 iemAImpl_cmp_u64, NULL
563};
564
565/** Function table for the TEST instruction.
566 * @remarks Making operand order ASSUMPTIONS.
567 */
568IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
569{
570 iemAImpl_test_u8, NULL,
571 iemAImpl_test_u16, NULL,
572 iemAImpl_test_u32, NULL,
573 iemAImpl_test_u64, NULL
574};
575
576/** Function table for the BT instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
578{
579 NULL, NULL,
580 iemAImpl_bt_u16, NULL,
581 iemAImpl_bt_u32, NULL,
582 iemAImpl_bt_u64, NULL
583};
584
585/** Function table for the BTC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
587{
588 NULL, NULL,
589 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
590 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
591 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
592};
593
594/** Function table for the BTR instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
596{
597 NULL, NULL,
598 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
599 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
600 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
601};
602
603/** Function table for the BTS instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
605{
606 NULL, NULL,
607 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
608 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
609 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
610};
611
612/** Function table for the BSF instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
614{
615 NULL, NULL,
616 iemAImpl_bsf_u16, NULL,
617 iemAImpl_bsf_u32, NULL,
618 iemAImpl_bsf_u64, NULL
619};
620
621/** Function table for the BSR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
623{
624 NULL, NULL,
625 iemAImpl_bsr_u16, NULL,
626 iemAImpl_bsr_u32, NULL,
627 iemAImpl_bsr_u64, NULL
628};
629
630/** Function table for the IMUL instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
632{
633 NULL, NULL,
634 iemAImpl_imul_two_u16, NULL,
635 iemAImpl_imul_two_u32, NULL,
636 iemAImpl_imul_two_u64, NULL
637};
638
639/** Group 1 /r lookup table. */
640IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
641{
642 &g_iemAImpl_add,
643 &g_iemAImpl_or,
644 &g_iemAImpl_adc,
645 &g_iemAImpl_sbb,
646 &g_iemAImpl_and,
647 &g_iemAImpl_sub,
648 &g_iemAImpl_xor,
649 &g_iemAImpl_cmp
650};
651
652/** Function table for the INC instruction. */
653IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
654{
655 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
656 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
657 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
658 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
659};
660
661/** Function table for the DEC instruction. */
662IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
663{
664 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
665 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
666 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
667 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
668};
669
670/** Function table for the NEG instruction. */
671IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
672{
673 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
674 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
675 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
676 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
677};
678
679/** Function table for the NOT instruction. */
680IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
681{
682 iemAImpl_not_u8, iemAImpl_not_u8_locked,
683 iemAImpl_not_u16, iemAImpl_not_u16_locked,
684 iemAImpl_not_u32, iemAImpl_not_u32_locked,
685 iemAImpl_not_u64, iemAImpl_not_u64_locked
686};
687
688
689/** Function table for the ROL instruction. */
690IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
691{
692 iemAImpl_rol_u8,
693 iemAImpl_rol_u16,
694 iemAImpl_rol_u32,
695 iemAImpl_rol_u64
696};
697
698/** Function table for the ROR instruction. */
699IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
700{
701 iemAImpl_ror_u8,
702 iemAImpl_ror_u16,
703 iemAImpl_ror_u32,
704 iemAImpl_ror_u64
705};
706
707/** Function table for the RCL instruction. */
708IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
709{
710 iemAImpl_rcl_u8,
711 iemAImpl_rcl_u16,
712 iemAImpl_rcl_u32,
713 iemAImpl_rcl_u64
714};
715
716/** Function table for the RCR instruction. */
717IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
718{
719 iemAImpl_rcr_u8,
720 iemAImpl_rcr_u16,
721 iemAImpl_rcr_u32,
722 iemAImpl_rcr_u64
723};
724
725/** Function table for the SHL instruction. */
726IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
727{
728 iemAImpl_shl_u8,
729 iemAImpl_shl_u16,
730 iemAImpl_shl_u32,
731 iemAImpl_shl_u64
732};
733
734/** Function table for the SHR instruction. */
735IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
736{
737 iemAImpl_shr_u8,
738 iemAImpl_shr_u16,
739 iemAImpl_shr_u32,
740 iemAImpl_shr_u64
741};
742
743/** Function table for the SAR instruction. */
744IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
745{
746 iemAImpl_sar_u8,
747 iemAImpl_sar_u16,
748 iemAImpl_sar_u32,
749 iemAImpl_sar_u64
750};
751
752
753/** Function table for the MUL instruction. */
754IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
755{
756 iemAImpl_mul_u8,
757 iemAImpl_mul_u16,
758 iemAImpl_mul_u32,
759 iemAImpl_mul_u64
760};
761
762/** Function table for the IMUL instruction working implicitly on rAX. */
763IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
764{
765 iemAImpl_imul_u8,
766 iemAImpl_imul_u16,
767 iemAImpl_imul_u32,
768 iemAImpl_imul_u64
769};
770
771/** Function table for the DIV instruction. */
772IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
773{
774 iemAImpl_div_u8,
775 iemAImpl_div_u16,
776 iemAImpl_div_u32,
777 iemAImpl_div_u64
778};
779
780/** Function table for the MUL instruction. */
781IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
782{
783 iemAImpl_idiv_u8,
784 iemAImpl_idiv_u16,
785 iemAImpl_idiv_u32,
786 iemAImpl_idiv_u64
787};
788
789/** Function table for the SHLD instruction */
790IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
791{
792 iemAImpl_shld_u16,
793 iemAImpl_shld_u32,
794 iemAImpl_shld_u64,
795};
796
797/** Function table for the SHRD instruction */
798IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
799{
800 iemAImpl_shrd_u16,
801 iemAImpl_shrd_u32,
802 iemAImpl_shrd_u64,
803};
804
805
806/** Function table for the PUNPCKLBW instruction */
807IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
808/** Function table for the PUNPCKLBD instruction */
809IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
810/** Function table for the PUNPCKLDQ instruction */
811IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
812/** Function table for the PUNPCKLQDQ instruction */
813IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
814
815/** Function table for the PUNPCKHBW instruction */
816IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
817/** Function table for the PUNPCKHBD instruction */
818IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
819/** Function table for the PUNPCKHDQ instruction */
820IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
821/** Function table for the PUNPCKHQDQ instruction */
822IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
823
824/** Function table for the PXOR instruction */
825IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
826/** Function table for the PCMPEQB instruction */
827IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
828/** Function table for the PCMPEQW instruction */
829IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
830/** Function table for the PCMPEQD instruction */
831IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
832
833
834#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
835/** What IEM just wrote. */
836uint8_t g_abIemWrote[256];
837/** How much IEM just wrote. */
838size_t g_cbIemWrote;
839#endif
840
841
842/*********************************************************************************************************************************
843* Internal Functions *
844*********************************************************************************************************************************/
845IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
846IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
847IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
849/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
850IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
851IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
852IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
853IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
854IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
856IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
857IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
858IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
859IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
860IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
861IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
862#ifdef IEM_WITH_SETJMP
863DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
865DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
868#endif
869
870IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
871IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
872IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
873IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
874IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
875IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
880IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
881IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
882IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
884IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
885IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
886
887#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
888IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
889#endif
890IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
891IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
892
893#ifdef VBOX_WITH_NESTED_HWVIRT
894/**
895 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
896 * accordingly.
897 *
898 * @returns VBox strict status code.
899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
900 * @param u16Port The IO port being accessed.
901 * @param enmIoType The type of IO access.
902 * @param cbReg The IO operand size in bytes.
903 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
904 * @param iEffSeg The effective segment number.
905 * @param fRep Whether this is a repeating IO instruction (REP prefix).
906 * @param fStrIo Whether this is a string IO instruction.
907 * @param cbInstr The length of the IO instruction in bytes.
908 *
909 * @remarks This must be called only when IO instructions are intercepted by the
910 * nested-guest hypervisor.
911 */
912IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
913 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
914{
915 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
916 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
917 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
918
919 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
920 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
921
922 SVMIOIOEXITINFO IoExitInfo;
923 IoExitInfo.u = s_auIoOpSize[cbReg & 7];
924 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
925 IoExitInfo.n.u1STR = fStrIo;
926 IoExitInfo.n.u1REP = fRep;
927 IoExitInfo.n.u3SEG = iEffSeg & 0x7;
928 IoExitInfo.n.u1Type = enmIoType;
929 IoExitInfo.n.u16Port = u16Port;
930
931 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
932 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
933}
934
935#else
936IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
937 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
938{
939 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
940 return VERR_IEM_IPE_9;
941}
942#endif /* VBOX_WITH_NESTED_HWVIRT */
943
944
945/**
946 * Sets the pass up status.
947 *
948 * @returns VINF_SUCCESS.
949 * @param pVCpu The cross context virtual CPU structure of the
950 * calling thread.
951 * @param rcPassUp The pass up status. Must be informational.
952 * VINF_SUCCESS is not allowed.
953 */
954IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
955{
956 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
957
958 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
959 if (rcOldPassUp == VINF_SUCCESS)
960 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
961 /* If both are EM scheduling codes, use EM priority rules. */
962 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
963 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
964 {
965 if (rcPassUp < rcOldPassUp)
966 {
967 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
968 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
969 }
970 else
971 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
972 }
973 /* Override EM scheduling with specific status code. */
974 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
975 {
976 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
977 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
978 }
979 /* Don't override specific status code, first come first served. */
980 else
981 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 return VINF_SUCCESS;
983}
984
985
986/**
987 * Calculates the CPU mode.
988 *
989 * This is mainly for updating IEMCPU::enmCpuMode.
990 *
991 * @returns CPU mode.
992 * @param pCtx The register context for the CPU.
993 */
994DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
995{
996 if (CPUMIsGuestIn64BitCodeEx(pCtx))
997 return IEMMODE_64BIT;
998 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
999 return IEMMODE_32BIT;
1000 return IEMMODE_16BIT;
1001}
1002
1003
1004/**
1005 * Initializes the execution state.
1006 *
1007 * @param pVCpu The cross context virtual CPU structure of the
1008 * calling thread.
1009 * @param fBypassHandlers Whether to bypass access handlers.
1010 *
1011 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1012 * side-effects in strict builds.
1013 */
1014DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1015{
1016 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1017
1018 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1019
1020#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1025 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1026 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1027 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1029#endif
1030
1031#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1032 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1033#endif
1034 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1035 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1036#ifdef VBOX_STRICT
1037 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1038 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1039 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1040 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1041 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1042 pVCpu->iem.s.uRexReg = 127;
1043 pVCpu->iem.s.uRexB = 127;
1044 pVCpu->iem.s.uRexIndex = 127;
1045 pVCpu->iem.s.iEffSeg = 127;
1046 pVCpu->iem.s.idxPrefix = 127;
1047 pVCpu->iem.s.uVex3rdReg = 127;
1048 pVCpu->iem.s.uVexLength = 127;
1049 pVCpu->iem.s.fEvexStuff = 127;
1050 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1051# ifdef IEM_WITH_CODE_TLB
1052 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1053 pVCpu->iem.s.pbInstrBuf = NULL;
1054 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1055 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1056 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1057 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1058# else
1059 pVCpu->iem.s.offOpcode = 127;
1060 pVCpu->iem.s.cbOpcode = 127;
1061# endif
1062#endif
1063
1064 pVCpu->iem.s.cActiveMappings = 0;
1065 pVCpu->iem.s.iNextMapping = 0;
1066 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1067 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1068#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1069 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1070 && pCtx->cs.u64Base == 0
1071 && pCtx->cs.u32Limit == UINT32_MAX
1072 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1073 if (!pVCpu->iem.s.fInPatchCode)
1074 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1075#endif
1076
1077#ifdef IEM_VERIFICATION_MODE_FULL
1078 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1079 pVCpu->iem.s.fNoRem = true;
1080#endif
1081}
1082
1083
1084/**
1085 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1086 *
1087 * @param pVCpu The cross context virtual CPU structure of the
1088 * calling thread.
1089 */
1090DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1091{
1092 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1093#ifdef IEM_VERIFICATION_MODE_FULL
1094 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1095#endif
1096#ifdef VBOX_STRICT
1097# ifdef IEM_WITH_CODE_TLB
1098 NOREF(pVCpu);
1099# else
1100 pVCpu->iem.s.cbOpcode = 0;
1101# endif
1102#else
1103 NOREF(pVCpu);
1104#endif
1105}
1106
1107
1108/**
1109 * Initializes the decoder state.
1110 *
1111 * iemReInitDecoder is mostly a copy of this function.
1112 *
1113 * @param pVCpu The cross context virtual CPU structure of the
1114 * calling thread.
1115 * @param fBypassHandlers Whether to bypass access handlers.
1116 */
1117DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1118{
1119 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1120
1121 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1122
1123#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1129 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1130 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1132#endif
1133
1134#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1135 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1136#endif
1137 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1138#ifdef IEM_VERIFICATION_MODE_FULL
1139 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1140 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1141#endif
1142 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1143 pVCpu->iem.s.enmCpuMode = enmMode;
1144 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffAddrMode = enmMode;
1146 if (enmMode != IEMMODE_64BIT)
1147 {
1148 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffOpSize = enmMode;
1150 }
1151 else
1152 {
1153 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1154 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1155 }
1156 pVCpu->iem.s.fPrefixes = 0;
1157 pVCpu->iem.s.uRexReg = 0;
1158 pVCpu->iem.s.uRexB = 0;
1159 pVCpu->iem.s.uRexIndex = 0;
1160 pVCpu->iem.s.idxPrefix = 0;
1161 pVCpu->iem.s.uVex3rdReg = 0;
1162 pVCpu->iem.s.uVexLength = 0;
1163 pVCpu->iem.s.fEvexStuff = 0;
1164 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1165#ifdef IEM_WITH_CODE_TLB
1166 pVCpu->iem.s.pbInstrBuf = NULL;
1167 pVCpu->iem.s.offInstrNextByte = 0;
1168 pVCpu->iem.s.offCurInstrStart = 0;
1169# ifdef VBOX_STRICT
1170 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1171 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1172 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1173# endif
1174#else
1175 pVCpu->iem.s.offOpcode = 0;
1176 pVCpu->iem.s.cbOpcode = 0;
1177#endif
1178 pVCpu->iem.s.cActiveMappings = 0;
1179 pVCpu->iem.s.iNextMapping = 0;
1180 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1181 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1182#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1183 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1184 && pCtx->cs.u64Base == 0
1185 && pCtx->cs.u32Limit == UINT32_MAX
1186 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1187 if (!pVCpu->iem.s.fInPatchCode)
1188 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1189#endif
1190
1191#ifdef DBGFTRACE_ENABLED
1192 switch (enmMode)
1193 {
1194 case IEMMODE_64BIT:
1195 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1196 break;
1197 case IEMMODE_32BIT:
1198 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1199 break;
1200 case IEMMODE_16BIT:
1201 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1202 break;
1203 }
1204#endif
1205}
1206
1207
1208/**
1209 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1210 *
1211 * This is mostly a copy of iemInitDecoder.
1212 *
1213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1214 */
1215DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1216{
1217 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1218
1219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1220
1221#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1227 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1230#endif
1231
1232 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1235 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1236#endif
1237 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1238 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1239 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1240 pVCpu->iem.s.enmEffAddrMode = enmMode;
1241 if (enmMode != IEMMODE_64BIT)
1242 {
1243 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1244 pVCpu->iem.s.enmEffOpSize = enmMode;
1245 }
1246 else
1247 {
1248 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1249 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1250 }
1251 pVCpu->iem.s.fPrefixes = 0;
1252 pVCpu->iem.s.uRexReg = 0;
1253 pVCpu->iem.s.uRexB = 0;
1254 pVCpu->iem.s.uRexIndex = 0;
1255 pVCpu->iem.s.idxPrefix = 0;
1256 pVCpu->iem.s.uVex3rdReg = 0;
1257 pVCpu->iem.s.uVexLength = 0;
1258 pVCpu->iem.s.fEvexStuff = 0;
1259 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1260#ifdef IEM_WITH_CODE_TLB
1261 if (pVCpu->iem.s.pbInstrBuf)
1262 {
1263 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1264 - pVCpu->iem.s.uInstrBufPc;
1265 if (off < pVCpu->iem.s.cbInstrBufTotal)
1266 {
1267 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1268 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1269 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1270 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1271 else
1272 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1273 }
1274 else
1275 {
1276 pVCpu->iem.s.pbInstrBuf = NULL;
1277 pVCpu->iem.s.offInstrNextByte = 0;
1278 pVCpu->iem.s.offCurInstrStart = 0;
1279 pVCpu->iem.s.cbInstrBuf = 0;
1280 pVCpu->iem.s.cbInstrBufTotal = 0;
1281 }
1282 }
1283 else
1284 {
1285 pVCpu->iem.s.offInstrNextByte = 0;
1286 pVCpu->iem.s.offCurInstrStart = 0;
1287 pVCpu->iem.s.cbInstrBuf = 0;
1288 pVCpu->iem.s.cbInstrBufTotal = 0;
1289 }
1290#else
1291 pVCpu->iem.s.cbOpcode = 0;
1292 pVCpu->iem.s.offOpcode = 0;
1293#endif
1294 Assert(pVCpu->iem.s.cActiveMappings == 0);
1295 pVCpu->iem.s.iNextMapping = 0;
1296 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1297 Assert(pVCpu->iem.s.fBypassHandlers == false);
1298#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1299 if (!pVCpu->iem.s.fInPatchCode)
1300 { /* likely */ }
1301 else
1302 {
1303 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1304 && pCtx->cs.u64Base == 0
1305 && pCtx->cs.u32Limit == UINT32_MAX
1306 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1307 if (!pVCpu->iem.s.fInPatchCode)
1308 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1309 }
1310#endif
1311
1312#ifdef DBGFTRACE_ENABLED
1313 switch (enmMode)
1314 {
1315 case IEMMODE_64BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1317 break;
1318 case IEMMODE_32BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1320 break;
1321 case IEMMODE_16BIT:
1322 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1323 break;
1324 }
1325#endif
1326}
1327
1328
1329
1330/**
1331 * Prefetch opcodes the first time when starting executing.
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pVCpu The cross context virtual CPU structure of the
1335 * calling thread.
1336 * @param fBypassHandlers Whether to bypass access handlers.
1337 */
1338IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1339{
1340#ifdef IEM_VERIFICATION_MODE_FULL
1341 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1342#endif
1343 iemInitDecoder(pVCpu, fBypassHandlers);
1344
1345#ifdef IEM_WITH_CODE_TLB
1346 /** @todo Do ITLB lookup here. */
1347
1348#else /* !IEM_WITH_CODE_TLB */
1349
1350 /*
1351 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1352 *
1353 * First translate CS:rIP to a physical address.
1354 */
1355 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1356 uint32_t cbToTryRead;
1357 RTGCPTR GCPtrPC;
1358 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1359 {
1360 cbToTryRead = PAGE_SIZE;
1361 GCPtrPC = pCtx->rip;
1362 if (IEM_IS_CANONICAL(GCPtrPC))
1363 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1364 else
1365 return iemRaiseGeneralProtectionFault0(pVCpu);
1366 }
1367 else
1368 {
1369 uint32_t GCPtrPC32 = pCtx->eip;
1370 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1371 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1372 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1373 else
1374 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1375 if (cbToTryRead) { /* likely */ }
1376 else /* overflowed */
1377 {
1378 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1379 cbToTryRead = UINT32_MAX;
1380 }
1381 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1382 Assert(GCPtrPC <= UINT32_MAX);
1383 }
1384
1385# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1386 /* Allow interpretation of patch manager code blocks since they can for
1387 instance throw #PFs for perfectly good reasons. */
1388 if (pVCpu->iem.s.fInPatchCode)
1389 {
1390 size_t cbRead = 0;
1391 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1392 AssertRCReturn(rc, rc);
1393 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1394 return VINF_SUCCESS;
1395 }
1396# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1397
1398 RTGCPHYS GCPhys;
1399 uint64_t fFlags;
1400 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1401 if (RT_SUCCESS(rc)) { /* probable */ }
1402 else
1403 {
1404 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1405 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1406 }
1407 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1408 else
1409 {
1410 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1411 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1412 }
1413 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1414 else
1415 {
1416 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1417 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1418 }
1419 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1420 /** @todo Check reserved bits and such stuff. PGM is better at doing
1421 * that, so do it when implementing the guest virtual address
1422 * TLB... */
1423
1424# ifdef IEM_VERIFICATION_MODE_FULL
1425 /*
1426 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1427 * instruction.
1428 */
1429 /** @todo optimize this differently by not using PGMPhysRead. */
1430 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1431 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1432 if ( offPrevOpcodes < cbOldOpcodes
1433 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1434 {
1435 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1436 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1437 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1438 pVCpu->iem.s.cbOpcode = cbNew;
1439 return VINF_SUCCESS;
1440 }
1441# endif
1442
1443 /*
1444 * Read the bytes at this address.
1445 */
1446 PVM pVM = pVCpu->CTX_SUFF(pVM);
1447# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1448 size_t cbActual;
1449 if ( PATMIsEnabled(pVM)
1450 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1451 {
1452 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1453 Assert(cbActual > 0);
1454 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1455 }
1456 else
1457# endif
1458 {
1459 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1460 if (cbToTryRead > cbLeftOnPage)
1461 cbToTryRead = cbLeftOnPage;
1462 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1463 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1464
1465 if (!pVCpu->iem.s.fBypassHandlers)
1466 {
1467 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1468 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1469 { /* likely */ }
1470 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1471 {
1472 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1473 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1474 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1475 }
1476 else
1477 {
1478 Log((RT_SUCCESS(rcStrict)
1479 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1480 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1481 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1482 return rcStrict;
1483 }
1484 }
1485 else
1486 {
1487 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1488 if (RT_SUCCESS(rc))
1489 { /* likely */ }
1490 else
1491 {
1492 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1493 GCPtrPC, GCPhys, rc, cbToTryRead));
1494 return rc;
1495 }
1496 }
1497 pVCpu->iem.s.cbOpcode = cbToTryRead;
1498 }
1499#endif /* !IEM_WITH_CODE_TLB */
1500 return VINF_SUCCESS;
1501}
1502
1503
1504/**
1505 * Invalidates the IEM TLBs.
1506 *
1507 * This is called internally as well as by PGM when moving GC mappings.
1508 *
1509 * @returns
1510 * @param pVCpu The cross context virtual CPU structure of the calling
1511 * thread.
1512 * @param fVmm Set when PGM calls us with a remapping.
1513 */
1514VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1515{
1516#ifdef IEM_WITH_CODE_TLB
1517 pVCpu->iem.s.cbInstrBufTotal = 0;
1518 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1519 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1520 { /* very likely */ }
1521 else
1522 {
1523 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1524 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1525 while (i-- > 0)
1526 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1527 }
1528#endif
1529
1530#ifdef IEM_WITH_DATA_TLB
1531 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542 NOREF(pVCpu); NOREF(fVmm);
1543}
1544
1545
1546/**
1547 * Invalidates a page in the TLBs.
1548 *
1549 * @param pVCpu The cross context virtual CPU structure of the calling
1550 * thread.
1551 * @param GCPtr The address of the page to invalidate
1552 */
1553VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1554{
1555#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1556 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1557 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1558 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1559 uintptr_t idx = (uint8_t)GCPtr;
1560
1561# ifdef IEM_WITH_CODE_TLB
1562 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1563 {
1564 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1565 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1566 pVCpu->iem.s.cbInstrBufTotal = 0;
1567 }
1568# endif
1569
1570# ifdef IEM_WITH_DATA_TLB
1571 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1572 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1573# endif
1574#else
1575 NOREF(pVCpu); NOREF(GCPtr);
1576#endif
1577}
1578
1579
1580/**
1581 * Invalidates the host physical aspects of the IEM TLBs.
1582 *
1583 * This is called internally as well as by PGM when moving GC mappings.
1584 *
1585 * @param pVCpu The cross context virtual CPU structure of the calling
1586 * thread.
1587 */
1588VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1589{
1590#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1591 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1592
1593# ifdef IEM_WITH_CODE_TLB
1594 pVCpu->iem.s.cbInstrBufTotal = 0;
1595# endif
1596 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1597 if (uTlbPhysRev != 0)
1598 {
1599 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1600 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1601 }
1602 else
1603 {
1604 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1605 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1606
1607 unsigned i;
1608# ifdef IEM_WITH_CODE_TLB
1609 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1610 while (i-- > 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1613 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1614 }
1615# endif
1616# ifdef IEM_WITH_DATA_TLB
1617 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1618 while (i-- > 0)
1619 {
1620 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1621 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1622 }
1623# endif
1624 }
1625#else
1626 NOREF(pVCpu);
1627#endif
1628}
1629
1630
1631/**
1632 * Invalidates the host physical aspects of the IEM TLBs.
1633 *
1634 * This is called internally as well as by PGM when moving GC mappings.
1635 *
1636 * @param pVM The cross context VM structure.
1637 *
1638 * @remarks Caller holds the PGM lock.
1639 */
1640VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1641{
1642 RT_NOREF_PV(pVM);
1643}
1644
1645#ifdef IEM_WITH_CODE_TLB
1646
1647/**
1648 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1649 * failure and jumps.
1650 *
1651 * We end up here for a number of reasons:
1652 * - pbInstrBuf isn't yet initialized.
1653 * - Advancing beyond the buffer boundrary (e.g. cross page).
1654 * - Advancing beyond the CS segment limit.
1655 * - Fetching from non-mappable page (e.g. MMIO).
1656 *
1657 * @param pVCpu The cross context virtual CPU structure of the
1658 * calling thread.
1659 * @param pvDst Where to return the bytes.
1660 * @param cbDst Number of bytes to read.
1661 *
1662 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1663 */
1664IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1665{
1666#ifdef IN_RING3
1667//__debugbreak();
1668 for (;;)
1669 {
1670 Assert(cbDst <= 8);
1671 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1672
1673 /*
1674 * We might have a partial buffer match, deal with that first to make the
1675 * rest simpler. This is the first part of the cross page/buffer case.
1676 */
1677 if (pVCpu->iem.s.pbInstrBuf != NULL)
1678 {
1679 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1680 {
1681 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1682 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1683 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1684
1685 cbDst -= cbCopy;
1686 pvDst = (uint8_t *)pvDst + cbCopy;
1687 offBuf += cbCopy;
1688 pVCpu->iem.s.offInstrNextByte += offBuf;
1689 }
1690 }
1691
1692 /*
1693 * Check segment limit, figuring how much we're allowed to access at this point.
1694 *
1695 * We will fault immediately if RIP is past the segment limit / in non-canonical
1696 * territory. If we do continue, there are one or more bytes to read before we
1697 * end up in trouble and we need to do that first before faulting.
1698 */
1699 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1700 RTGCPTR GCPtrFirst;
1701 uint32_t cbMaxRead;
1702 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1703 {
1704 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1705 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1706 { /* likely */ }
1707 else
1708 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1709 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1710 }
1711 else
1712 {
1713 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1714 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1715 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1716 { /* likely */ }
1717 else
1718 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1719 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1720 if (cbMaxRead != 0)
1721 { /* likely */ }
1722 else
1723 {
1724 /* Overflowed because address is 0 and limit is max. */
1725 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1726 cbMaxRead = X86_PAGE_SIZE;
1727 }
1728 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1729 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1730 if (cbMaxRead2 < cbMaxRead)
1731 cbMaxRead = cbMaxRead2;
1732 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1733 }
1734
1735 /*
1736 * Get the TLB entry for this piece of code.
1737 */
1738 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1739 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1740 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1741 if (pTlbe->uTag == uTag)
1742 {
1743 /* likely when executing lots of code, otherwise unlikely */
1744# ifdef VBOX_WITH_STATISTICS
1745 pVCpu->iem.s.CodeTlb.cTlbHits++;
1746# endif
1747 }
1748 else
1749 {
1750 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1751# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1752 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1753 {
1754 pTlbe->uTag = uTag;
1755 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1756 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1757 pTlbe->GCPhys = NIL_RTGCPHYS;
1758 pTlbe->pbMappingR3 = NULL;
1759 }
1760 else
1761# endif
1762 {
1763 RTGCPHYS GCPhys;
1764 uint64_t fFlags;
1765 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1766 if (RT_FAILURE(rc))
1767 {
1768 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1769 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1770 }
1771
1772 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1773 pTlbe->uTag = uTag;
1774 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1775 pTlbe->GCPhys = GCPhys;
1776 pTlbe->pbMappingR3 = NULL;
1777 }
1778 }
1779
1780 /*
1781 * Check TLB page table level access flags.
1782 */
1783 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1784 {
1785 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1786 {
1787 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1788 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1789 }
1790 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1791 {
1792 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1793 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1794 }
1795 }
1796
1797# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1798 /*
1799 * Allow interpretation of patch manager code blocks since they can for
1800 * instance throw #PFs for perfectly good reasons.
1801 */
1802 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1803 { /* no unlikely */ }
1804 else
1805 {
1806 /** @todo Could be optimized this a little in ring-3 if we liked. */
1807 size_t cbRead = 0;
1808 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1809 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1810 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1811 return;
1812 }
1813# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1814
1815 /*
1816 * Look up the physical page info if necessary.
1817 */
1818 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1819 { /* not necessary */ }
1820 else
1821 {
1822 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1823 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1824 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1825 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1826 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1827 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1828 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1829 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1830 }
1831
1832# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1833 /*
1834 * Try do a direct read using the pbMappingR3 pointer.
1835 */
1836 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1837 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1838 {
1839 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1840 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1841 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1842 {
1843 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1844 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1845 }
1846 else
1847 {
1848 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1849 Assert(cbInstr < cbMaxRead);
1850 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1851 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1852 }
1853 if (cbDst <= cbMaxRead)
1854 {
1855 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1856 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1857 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1858 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1859 return;
1860 }
1861 pVCpu->iem.s.pbInstrBuf = NULL;
1862
1863 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1864 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1865 }
1866 else
1867# endif
1868#if 0
1869 /*
1870 * If there is no special read handling, so we can read a bit more and
1871 * put it in the prefetch buffer.
1872 */
1873 if ( cbDst < cbMaxRead
1874 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1875 {
1876 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1877 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1878 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1879 { /* likely */ }
1880 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1881 {
1882 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1883 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1884 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1885 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1886 }
1887 else
1888 {
1889 Log((RT_SUCCESS(rcStrict)
1890 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1891 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1893 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1894 }
1895 }
1896 /*
1897 * Special read handling, so only read exactly what's needed.
1898 * This is a highly unlikely scenario.
1899 */
1900 else
1901#endif
1902 {
1903 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1904 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1905 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1906 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1907 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1908 { /* likely */ }
1909 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1912 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1913 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1914 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1915 }
1916 else
1917 {
1918 Log((RT_SUCCESS(rcStrict)
1919 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1920 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1922 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1923 }
1924 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1925 if (cbToRead == cbDst)
1926 return;
1927 }
1928
1929 /*
1930 * More to read, loop.
1931 */
1932 cbDst -= cbMaxRead;
1933 pvDst = (uint8_t *)pvDst + cbMaxRead;
1934 }
1935#else
1936 RT_NOREF(pvDst, cbDst);
1937 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1938#endif
1939}
1940
1941#else
1942
1943/**
1944 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1945 * exception if it fails.
1946 *
1947 * @returns Strict VBox status code.
1948 * @param pVCpu The cross context virtual CPU structure of the
1949 * calling thread.
1950 * @param cbMin The minimum number of bytes relative offOpcode
1951 * that must be read.
1952 */
1953IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1954{
1955 /*
1956 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1957 *
1958 * First translate CS:rIP to a physical address.
1959 */
1960 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1961 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1962 uint32_t cbToTryRead;
1963 RTGCPTR GCPtrNext;
1964 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1965 {
1966 cbToTryRead = PAGE_SIZE;
1967 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1968 if (!IEM_IS_CANONICAL(GCPtrNext))
1969 return iemRaiseGeneralProtectionFault0(pVCpu);
1970 }
1971 else
1972 {
1973 uint32_t GCPtrNext32 = pCtx->eip;
1974 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1975 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1976 if (GCPtrNext32 > pCtx->cs.u32Limit)
1977 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1978 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1979 if (!cbToTryRead) /* overflowed */
1980 {
1981 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1982 cbToTryRead = UINT32_MAX;
1983 /** @todo check out wrapping around the code segment. */
1984 }
1985 if (cbToTryRead < cbMin - cbLeft)
1986 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1987 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1988 }
1989
1990 /* Only read up to the end of the page, and make sure we don't read more
1991 than the opcode buffer can hold. */
1992 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1993 if (cbToTryRead > cbLeftOnPage)
1994 cbToTryRead = cbLeftOnPage;
1995 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1996 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1997/** @todo r=bird: Convert assertion into undefined opcode exception? */
1998 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1999
2000# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2001 /* Allow interpretation of patch manager code blocks since they can for
2002 instance throw #PFs for perfectly good reasons. */
2003 if (pVCpu->iem.s.fInPatchCode)
2004 {
2005 size_t cbRead = 0;
2006 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2007 AssertRCReturn(rc, rc);
2008 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2009 return VINF_SUCCESS;
2010 }
2011# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2012
2013 RTGCPHYS GCPhys;
2014 uint64_t fFlags;
2015 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2016 if (RT_FAILURE(rc))
2017 {
2018 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2019 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2020 }
2021 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2022 {
2023 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2024 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2025 }
2026 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2027 {
2028 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2029 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2030 }
2031 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2032 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2033 /** @todo Check reserved bits and such stuff. PGM is better at doing
2034 * that, so do it when implementing the guest virtual address
2035 * TLB... */
2036
2037 /*
2038 * Read the bytes at this address.
2039 *
2040 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2041 * and since PATM should only patch the start of an instruction there
2042 * should be no need to check again here.
2043 */
2044 if (!pVCpu->iem.s.fBypassHandlers)
2045 {
2046 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2047 cbToTryRead, PGMACCESSORIGIN_IEM);
2048 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2049 { /* likely */ }
2050 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2051 {
2052 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2053 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2054 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2055 }
2056 else
2057 {
2058 Log((RT_SUCCESS(rcStrict)
2059 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2060 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2061 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2062 return rcStrict;
2063 }
2064 }
2065 else
2066 {
2067 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2068 if (RT_SUCCESS(rc))
2069 { /* likely */ }
2070 else
2071 {
2072 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2073 return rc;
2074 }
2075 }
2076 pVCpu->iem.s.cbOpcode += cbToTryRead;
2077 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2078
2079 return VINF_SUCCESS;
2080}
2081
2082#endif /* !IEM_WITH_CODE_TLB */
2083#ifndef IEM_WITH_SETJMP
2084
2085/**
2086 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2087 *
2088 * @returns Strict VBox status code.
2089 * @param pVCpu The cross context virtual CPU structure of the
2090 * calling thread.
2091 * @param pb Where to return the opcode byte.
2092 */
2093DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2094{
2095 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2096 if (rcStrict == VINF_SUCCESS)
2097 {
2098 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2099 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2100 pVCpu->iem.s.offOpcode = offOpcode + 1;
2101 }
2102 else
2103 *pb = 0;
2104 return rcStrict;
2105}
2106
2107
2108/**
2109 * Fetches the next opcode byte.
2110 *
2111 * @returns Strict VBox status code.
2112 * @param pVCpu The cross context virtual CPU structure of the
2113 * calling thread.
2114 * @param pu8 Where to return the opcode byte.
2115 */
2116DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2117{
2118 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2119 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2120 {
2121 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2122 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2123 return VINF_SUCCESS;
2124 }
2125 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2126}
2127
2128#else /* IEM_WITH_SETJMP */
2129
2130/**
2131 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2132 *
2133 * @returns The opcode byte.
2134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2137{
2138# ifdef IEM_WITH_CODE_TLB
2139 uint8_t u8;
2140 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2141 return u8;
2142# else
2143 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2144 if (rcStrict == VINF_SUCCESS)
2145 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2146 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2147# endif
2148}
2149
2150
2151/**
2152 * Fetches the next opcode byte, longjmp on error.
2153 *
2154 * @returns The opcode byte.
2155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2156 */
2157DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2158{
2159# ifdef IEM_WITH_CODE_TLB
2160 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2161 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2162 if (RT_LIKELY( pbBuf != NULL
2163 && offBuf < pVCpu->iem.s.cbInstrBuf))
2164 {
2165 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2166 return pbBuf[offBuf];
2167 }
2168# else
2169 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2170 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2171 {
2172 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2173 return pVCpu->iem.s.abOpcode[offOpcode];
2174 }
2175# endif
2176 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2177}
2178
2179#endif /* IEM_WITH_SETJMP */
2180
2181/**
2182 * Fetches the next opcode byte, returns automatically on failure.
2183 *
2184 * @param a_pu8 Where to return the opcode byte.
2185 * @remark Implicitly references pVCpu.
2186 */
2187#ifndef IEM_WITH_SETJMP
2188# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2189 do \
2190 { \
2191 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2192 if (rcStrict2 == VINF_SUCCESS) \
2193 { /* likely */ } \
2194 else \
2195 return rcStrict2; \
2196 } while (0)
2197#else
2198# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2199#endif /* IEM_WITH_SETJMP */
2200
2201
2202#ifndef IEM_WITH_SETJMP
2203/**
2204 * Fetches the next signed byte from the opcode stream.
2205 *
2206 * @returns Strict VBox status code.
2207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2208 * @param pi8 Where to return the signed byte.
2209 */
2210DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2211{
2212 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2213}
2214#endif /* !IEM_WITH_SETJMP */
2215
2216
2217/**
2218 * Fetches the next signed byte from the opcode stream, returning automatically
2219 * on failure.
2220 *
2221 * @param a_pi8 Where to return the signed byte.
2222 * @remark Implicitly references pVCpu.
2223 */
2224#ifndef IEM_WITH_SETJMP
2225# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2226 do \
2227 { \
2228 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2229 if (rcStrict2 != VINF_SUCCESS) \
2230 return rcStrict2; \
2231 } while (0)
2232#else /* IEM_WITH_SETJMP */
2233# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2234
2235#endif /* IEM_WITH_SETJMP */
2236
2237#ifndef IEM_WITH_SETJMP
2238
2239/**
2240 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2241 *
2242 * @returns Strict VBox status code.
2243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2244 * @param pu16 Where to return the opcode dword.
2245 */
2246DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2247{
2248 uint8_t u8;
2249 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2250 if (rcStrict == VINF_SUCCESS)
2251 *pu16 = (int8_t)u8;
2252 return rcStrict;
2253}
2254
2255
2256/**
2257 * Fetches the next signed byte from the opcode stream, extending it to
2258 * unsigned 16-bit.
2259 *
2260 * @returns Strict VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2262 * @param pu16 Where to return the unsigned word.
2263 */
2264DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2265{
2266 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2267 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2268 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2269
2270 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2271 pVCpu->iem.s.offOpcode = offOpcode + 1;
2272 return VINF_SUCCESS;
2273}
2274
2275#endif /* !IEM_WITH_SETJMP */
2276
2277/**
2278 * Fetches the next signed byte from the opcode stream and sign-extending it to
2279 * a word, returning automatically on failure.
2280 *
2281 * @param a_pu16 Where to return the word.
2282 * @remark Implicitly references pVCpu.
2283 */
2284#ifndef IEM_WITH_SETJMP
2285# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2286 do \
2287 { \
2288 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2289 if (rcStrict2 != VINF_SUCCESS) \
2290 return rcStrict2; \
2291 } while (0)
2292#else
2293# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2294#endif
2295
2296#ifndef IEM_WITH_SETJMP
2297
2298/**
2299 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2300 *
2301 * @returns Strict VBox status code.
2302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2303 * @param pu32 Where to return the opcode dword.
2304 */
2305DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2306{
2307 uint8_t u8;
2308 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2309 if (rcStrict == VINF_SUCCESS)
2310 *pu32 = (int8_t)u8;
2311 return rcStrict;
2312}
2313
2314
2315/**
2316 * Fetches the next signed byte from the opcode stream, extending it to
2317 * unsigned 32-bit.
2318 *
2319 * @returns Strict VBox status code.
2320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2321 * @param pu32 Where to return the unsigned dword.
2322 */
2323DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2324{
2325 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2326 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2327 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2328
2329 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2330 pVCpu->iem.s.offOpcode = offOpcode + 1;
2331 return VINF_SUCCESS;
2332}
2333
2334#endif /* !IEM_WITH_SETJMP */
2335
2336/**
2337 * Fetches the next signed byte from the opcode stream and sign-extending it to
2338 * a word, returning automatically on failure.
2339 *
2340 * @param a_pu32 Where to return the word.
2341 * @remark Implicitly references pVCpu.
2342 */
2343#ifndef IEM_WITH_SETJMP
2344#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2345 do \
2346 { \
2347 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2348 if (rcStrict2 != VINF_SUCCESS) \
2349 return rcStrict2; \
2350 } while (0)
2351#else
2352# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2353#endif
2354
2355#ifndef IEM_WITH_SETJMP
2356
2357/**
2358 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2359 *
2360 * @returns Strict VBox status code.
2361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2362 * @param pu64 Where to return the opcode qword.
2363 */
2364DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2365{
2366 uint8_t u8;
2367 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2368 if (rcStrict == VINF_SUCCESS)
2369 *pu64 = (int8_t)u8;
2370 return rcStrict;
2371}
2372
2373
2374/**
2375 * Fetches the next signed byte from the opcode stream, extending it to
2376 * unsigned 64-bit.
2377 *
2378 * @returns Strict VBox status code.
2379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2380 * @param pu64 Where to return the unsigned qword.
2381 */
2382DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2383{
2384 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2385 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2386 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2387
2388 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2389 pVCpu->iem.s.offOpcode = offOpcode + 1;
2390 return VINF_SUCCESS;
2391}
2392
2393#endif /* !IEM_WITH_SETJMP */
2394
2395
2396/**
2397 * Fetches the next signed byte from the opcode stream and sign-extending it to
2398 * a word, returning automatically on failure.
2399 *
2400 * @param a_pu64 Where to return the word.
2401 * @remark Implicitly references pVCpu.
2402 */
2403#ifndef IEM_WITH_SETJMP
2404# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2405 do \
2406 { \
2407 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2408 if (rcStrict2 != VINF_SUCCESS) \
2409 return rcStrict2; \
2410 } while (0)
2411#else
2412# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2413#endif
2414
2415
2416#ifndef IEM_WITH_SETJMP
2417
2418/**
2419 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2420 *
2421 * @returns Strict VBox status code.
2422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2423 * @param pu16 Where to return the opcode word.
2424 */
2425DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2426{
2427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2428 if (rcStrict == VINF_SUCCESS)
2429 {
2430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2432 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2433# else
2434 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2435# endif
2436 pVCpu->iem.s.offOpcode = offOpcode + 2;
2437 }
2438 else
2439 *pu16 = 0;
2440 return rcStrict;
2441}
2442
2443
2444/**
2445 * Fetches the next opcode word.
2446 *
2447 * @returns Strict VBox status code.
2448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2449 * @param pu16 Where to return the opcode word.
2450 */
2451DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2452{
2453 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2454 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2455 {
2456 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2457# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2458 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2459# else
2460 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2461# endif
2462 return VINF_SUCCESS;
2463 }
2464 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2465}
2466
2467#else /* IEM_WITH_SETJMP */
2468
2469/**
2470 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2471 *
2472 * @returns The opcode word.
2473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2474 */
2475DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2476{
2477# ifdef IEM_WITH_CODE_TLB
2478 uint16_t u16;
2479 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2480 return u16;
2481# else
2482 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2483 if (rcStrict == VINF_SUCCESS)
2484 {
2485 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2486 pVCpu->iem.s.offOpcode += 2;
2487# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2488 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2489# else
2490 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2491# endif
2492 }
2493 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2494# endif
2495}
2496
2497
2498/**
2499 * Fetches the next opcode word, longjmp on error.
2500 *
2501 * @returns The opcode word.
2502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2503 */
2504DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2505{
2506# ifdef IEM_WITH_CODE_TLB
2507 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2508 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2509 if (RT_LIKELY( pbBuf != NULL
2510 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2511 {
2512 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2513# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2514 return *(uint16_t const *)&pbBuf[offBuf];
2515# else
2516 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2517# endif
2518 }
2519# else
2520 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2521 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2522 {
2523 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2524# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2525 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2526# else
2527 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528# endif
2529 }
2530# endif
2531 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2532}
2533
2534#endif /* IEM_WITH_SETJMP */
2535
2536
2537/**
2538 * Fetches the next opcode word, returns automatically on failure.
2539 *
2540 * @param a_pu16 Where to return the opcode word.
2541 * @remark Implicitly references pVCpu.
2542 */
2543#ifndef IEM_WITH_SETJMP
2544# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2545 do \
2546 { \
2547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2548 if (rcStrict2 != VINF_SUCCESS) \
2549 return rcStrict2; \
2550 } while (0)
2551#else
2552# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2553#endif
2554
2555#ifndef IEM_WITH_SETJMP
2556
2557/**
2558 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2559 *
2560 * @returns Strict VBox status code.
2561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2562 * @param pu32 Where to return the opcode double word.
2563 */
2564DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2565{
2566 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2567 if (rcStrict == VINF_SUCCESS)
2568 {
2569 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2570 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2571 pVCpu->iem.s.offOpcode = offOpcode + 2;
2572 }
2573 else
2574 *pu32 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode word, zero extending it to a double word.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu32 Where to return the opcode double word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2587{
2588 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2590 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2591
2592 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2593 pVCpu->iem.s.offOpcode = offOpcode + 2;
2594 return VINF_SUCCESS;
2595}
2596
2597#endif /* !IEM_WITH_SETJMP */
2598
2599
2600/**
2601 * Fetches the next opcode word and zero extends it to a double word, returns
2602 * automatically on failure.
2603 *
2604 * @param a_pu32 Where to return the opcode double word.
2605 * @remark Implicitly references pVCpu.
2606 */
2607#ifndef IEM_WITH_SETJMP
2608# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2609 do \
2610 { \
2611 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2612 if (rcStrict2 != VINF_SUCCESS) \
2613 return rcStrict2; \
2614 } while (0)
2615#else
2616# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2617#endif
2618
2619#ifndef IEM_WITH_SETJMP
2620
2621/**
2622 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2623 *
2624 * @returns Strict VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 * @param pu64 Where to return the opcode quad word.
2627 */
2628DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2629{
2630 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2631 if (rcStrict == VINF_SUCCESS)
2632 {
2633 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2634 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2635 pVCpu->iem.s.offOpcode = offOpcode + 2;
2636 }
2637 else
2638 *pu64 = 0;
2639 return rcStrict;
2640}
2641
2642
2643/**
2644 * Fetches the next opcode word, zero extending it to a quad word.
2645 *
2646 * @returns Strict VBox status code.
2647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2648 * @param pu64 Where to return the opcode quad word.
2649 */
2650DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2651{
2652 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2654 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2655
2656 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2657 pVCpu->iem.s.offOpcode = offOpcode + 2;
2658 return VINF_SUCCESS;
2659}
2660
2661#endif /* !IEM_WITH_SETJMP */
2662
2663/**
2664 * Fetches the next opcode word and zero extends it to a quad word, returns
2665 * automatically on failure.
2666 *
2667 * @param a_pu64 Where to return the opcode quad word.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2680#endif
2681
2682
2683#ifndef IEM_WITH_SETJMP
2684/**
2685 * Fetches the next signed word from the opcode stream.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pi16 Where to return the signed word.
2690 */
2691DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2692{
2693 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2694}
2695#endif /* !IEM_WITH_SETJMP */
2696
2697
2698/**
2699 * Fetches the next signed word from the opcode stream, returning automatically
2700 * on failure.
2701 *
2702 * @param a_pi16 Where to return the signed word.
2703 * @remark Implicitly references pVCpu.
2704 */
2705#ifndef IEM_WITH_SETJMP
2706# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2707 do \
2708 { \
2709 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2710 if (rcStrict2 != VINF_SUCCESS) \
2711 return rcStrict2; \
2712 } while (0)
2713#else
2714# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2715#endif
2716
2717#ifndef IEM_WITH_SETJMP
2718
2719/**
2720 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2721 *
2722 * @returns Strict VBox status code.
2723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2724 * @param pu32 Where to return the opcode dword.
2725 */
2726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2727{
2728 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2729 if (rcStrict == VINF_SUCCESS)
2730 {
2731 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2732# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2733 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2734# else
2735 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2736 pVCpu->iem.s.abOpcode[offOpcode + 1],
2737 pVCpu->iem.s.abOpcode[offOpcode + 2],
2738 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2739# endif
2740 pVCpu->iem.s.offOpcode = offOpcode + 4;
2741 }
2742 else
2743 *pu32 = 0;
2744 return rcStrict;
2745}
2746
2747
2748/**
2749 * Fetches the next opcode dword.
2750 *
2751 * @returns Strict VBox status code.
2752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2753 * @param pu32 Where to return the opcode double word.
2754 */
2755DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2756{
2757 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2758 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2759 {
2760 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2761# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2762 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2763# else
2764 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2765 pVCpu->iem.s.abOpcode[offOpcode + 1],
2766 pVCpu->iem.s.abOpcode[offOpcode + 2],
2767 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2768# endif
2769 return VINF_SUCCESS;
2770 }
2771 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2772}
2773
2774#else /* !IEM_WITH_SETJMP */
2775
2776/**
2777 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2778 *
2779 * @returns The opcode dword.
2780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2781 */
2782DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2783{
2784# ifdef IEM_WITH_CODE_TLB
2785 uint32_t u32;
2786 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2787 return u32;
2788# else
2789 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2790 if (rcStrict == VINF_SUCCESS)
2791 {
2792 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2793 pVCpu->iem.s.offOpcode = offOpcode + 4;
2794# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2795 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2796# else
2797 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2798 pVCpu->iem.s.abOpcode[offOpcode + 1],
2799 pVCpu->iem.s.abOpcode[offOpcode + 2],
2800 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2801# endif
2802 }
2803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2804# endif
2805}
2806
2807
2808/**
2809 * Fetches the next opcode dword, longjmp on error.
2810 *
2811 * @returns The opcode dword.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 */
2814DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2815{
2816# ifdef IEM_WITH_CODE_TLB
2817 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2818 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2819 if (RT_LIKELY( pbBuf != NULL
2820 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2821 {
2822 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2823# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2824 return *(uint32_t const *)&pbBuf[offBuf];
2825# else
2826 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2827 pbBuf[offBuf + 1],
2828 pbBuf[offBuf + 2],
2829 pbBuf[offBuf + 3]);
2830# endif
2831 }
2832# else
2833 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2834 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2835 {
2836 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2837# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2838 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2839# else
2840 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2841 pVCpu->iem.s.abOpcode[offOpcode + 1],
2842 pVCpu->iem.s.abOpcode[offOpcode + 2],
2843 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2844# endif
2845 }
2846# endif
2847 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2848}
2849
2850#endif /* !IEM_WITH_SETJMP */
2851
2852
2853/**
2854 * Fetches the next opcode dword, returns automatically on failure.
2855 *
2856 * @param a_pu32 Where to return the opcode dword.
2857 * @remark Implicitly references pVCpu.
2858 */
2859#ifndef IEM_WITH_SETJMP
2860# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2861 do \
2862 { \
2863 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2864 if (rcStrict2 != VINF_SUCCESS) \
2865 return rcStrict2; \
2866 } while (0)
2867#else
2868# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2869#endif
2870
2871#ifndef IEM_WITH_SETJMP
2872
2873/**
2874 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2875 *
2876 * @returns Strict VBox status code.
2877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2878 * @param pu64 Where to return the opcode dword.
2879 */
2880DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2881{
2882 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2883 if (rcStrict == VINF_SUCCESS)
2884 {
2885 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2886 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2887 pVCpu->iem.s.abOpcode[offOpcode + 1],
2888 pVCpu->iem.s.abOpcode[offOpcode + 2],
2889 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2890 pVCpu->iem.s.offOpcode = offOpcode + 4;
2891 }
2892 else
2893 *pu64 = 0;
2894 return rcStrict;
2895}
2896
2897
2898/**
2899 * Fetches the next opcode dword, zero extending it to a quad word.
2900 *
2901 * @returns Strict VBox status code.
2902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2903 * @param pu64 Where to return the opcode quad word.
2904 */
2905DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2906{
2907 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2908 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2909 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2910
2911 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2912 pVCpu->iem.s.abOpcode[offOpcode + 1],
2913 pVCpu->iem.s.abOpcode[offOpcode + 2],
2914 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2915 pVCpu->iem.s.offOpcode = offOpcode + 4;
2916 return VINF_SUCCESS;
2917}
2918
2919#endif /* !IEM_WITH_SETJMP */
2920
2921
2922/**
2923 * Fetches the next opcode dword and zero extends it to a quad word, returns
2924 * automatically on failure.
2925 *
2926 * @param a_pu64 Where to return the opcode quad word.
2927 * @remark Implicitly references pVCpu.
2928 */
2929#ifndef IEM_WITH_SETJMP
2930# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2931 do \
2932 { \
2933 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2934 if (rcStrict2 != VINF_SUCCESS) \
2935 return rcStrict2; \
2936 } while (0)
2937#else
2938# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2939#endif
2940
2941
2942#ifndef IEM_WITH_SETJMP
2943/**
2944 * Fetches the next signed double word from the opcode stream.
2945 *
2946 * @returns Strict VBox status code.
2947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2948 * @param pi32 Where to return the signed double word.
2949 */
2950DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2951{
2952 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2953}
2954#endif
2955
2956/**
2957 * Fetches the next signed double word from the opcode stream, returning
2958 * automatically on failure.
2959 *
2960 * @param a_pi32 Where to return the signed double word.
2961 * @remark Implicitly references pVCpu.
2962 */
2963#ifndef IEM_WITH_SETJMP
2964# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2965 do \
2966 { \
2967 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2968 if (rcStrict2 != VINF_SUCCESS) \
2969 return rcStrict2; \
2970 } while (0)
2971#else
2972# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2973#endif
2974
2975#ifndef IEM_WITH_SETJMP
2976
2977/**
2978 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2979 *
2980 * @returns Strict VBox status code.
2981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2982 * @param pu64 Where to return the opcode qword.
2983 */
2984DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2985{
2986 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2987 if (rcStrict == VINF_SUCCESS)
2988 {
2989 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2990 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2991 pVCpu->iem.s.abOpcode[offOpcode + 1],
2992 pVCpu->iem.s.abOpcode[offOpcode + 2],
2993 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2994 pVCpu->iem.s.offOpcode = offOpcode + 4;
2995 }
2996 else
2997 *pu64 = 0;
2998 return rcStrict;
2999}
3000
3001
3002/**
3003 * Fetches the next opcode dword, sign extending it into a quad word.
3004 *
3005 * @returns Strict VBox status code.
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param pu64 Where to return the opcode quad word.
3008 */
3009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3010{
3011 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3012 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3013 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3014
3015 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3016 pVCpu->iem.s.abOpcode[offOpcode + 1],
3017 pVCpu->iem.s.abOpcode[offOpcode + 2],
3018 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3019 *pu64 = i32;
3020 pVCpu->iem.s.offOpcode = offOpcode + 4;
3021 return VINF_SUCCESS;
3022}
3023
3024#endif /* !IEM_WITH_SETJMP */
3025
3026
3027/**
3028 * Fetches the next opcode double word and sign extends it to a quad word,
3029 * returns automatically on failure.
3030 *
3031 * @param a_pu64 Where to return the opcode quad word.
3032 * @remark Implicitly references pVCpu.
3033 */
3034#ifndef IEM_WITH_SETJMP
3035# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3036 do \
3037 { \
3038 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3039 if (rcStrict2 != VINF_SUCCESS) \
3040 return rcStrict2; \
3041 } while (0)
3042#else
3043# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3044#endif
3045
3046#ifndef IEM_WITH_SETJMP
3047
3048/**
3049 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3050 *
3051 * @returns Strict VBox status code.
3052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3053 * @param pu64 Where to return the opcode qword.
3054 */
3055DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3056{
3057 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3058 if (rcStrict == VINF_SUCCESS)
3059 {
3060 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3061# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3062 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3063# else
3064 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3065 pVCpu->iem.s.abOpcode[offOpcode + 1],
3066 pVCpu->iem.s.abOpcode[offOpcode + 2],
3067 pVCpu->iem.s.abOpcode[offOpcode + 3],
3068 pVCpu->iem.s.abOpcode[offOpcode + 4],
3069 pVCpu->iem.s.abOpcode[offOpcode + 5],
3070 pVCpu->iem.s.abOpcode[offOpcode + 6],
3071 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3072# endif
3073 pVCpu->iem.s.offOpcode = offOpcode + 8;
3074 }
3075 else
3076 *pu64 = 0;
3077 return rcStrict;
3078}
3079
3080
3081/**
3082 * Fetches the next opcode qword.
3083 *
3084 * @returns Strict VBox status code.
3085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3086 * @param pu64 Where to return the opcode qword.
3087 */
3088DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3089{
3090 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3091 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3092 {
3093# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3094 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3095# else
3096 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3097 pVCpu->iem.s.abOpcode[offOpcode + 1],
3098 pVCpu->iem.s.abOpcode[offOpcode + 2],
3099 pVCpu->iem.s.abOpcode[offOpcode + 3],
3100 pVCpu->iem.s.abOpcode[offOpcode + 4],
3101 pVCpu->iem.s.abOpcode[offOpcode + 5],
3102 pVCpu->iem.s.abOpcode[offOpcode + 6],
3103 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3104# endif
3105 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3106 return VINF_SUCCESS;
3107 }
3108 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3109}
3110
3111#else /* IEM_WITH_SETJMP */
3112
3113/**
3114 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3115 *
3116 * @returns The opcode qword.
3117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3118 */
3119DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3120{
3121# ifdef IEM_WITH_CODE_TLB
3122 uint64_t u64;
3123 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3124 return u64;
3125# else
3126 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3127 if (rcStrict == VINF_SUCCESS)
3128 {
3129 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3130 pVCpu->iem.s.offOpcode = offOpcode + 8;
3131# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3132 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3133# else
3134 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3135 pVCpu->iem.s.abOpcode[offOpcode + 1],
3136 pVCpu->iem.s.abOpcode[offOpcode + 2],
3137 pVCpu->iem.s.abOpcode[offOpcode + 3],
3138 pVCpu->iem.s.abOpcode[offOpcode + 4],
3139 pVCpu->iem.s.abOpcode[offOpcode + 5],
3140 pVCpu->iem.s.abOpcode[offOpcode + 6],
3141 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3142# endif
3143 }
3144 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3145# endif
3146}
3147
3148
3149/**
3150 * Fetches the next opcode qword, longjmp on error.
3151 *
3152 * @returns The opcode qword.
3153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3154 */
3155DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3156{
3157# ifdef IEM_WITH_CODE_TLB
3158 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3159 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3160 if (RT_LIKELY( pbBuf != NULL
3161 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3162 {
3163 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3164# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3165 return *(uint64_t const *)&pbBuf[offBuf];
3166# else
3167 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3168 pbBuf[offBuf + 1],
3169 pbBuf[offBuf + 2],
3170 pbBuf[offBuf + 3],
3171 pbBuf[offBuf + 4],
3172 pbBuf[offBuf + 5],
3173 pbBuf[offBuf + 6],
3174 pbBuf[offBuf + 7]);
3175# endif
3176 }
3177# else
3178 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3179 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3180 {
3181 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3183 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3184# else
3185 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3186 pVCpu->iem.s.abOpcode[offOpcode + 1],
3187 pVCpu->iem.s.abOpcode[offOpcode + 2],
3188 pVCpu->iem.s.abOpcode[offOpcode + 3],
3189 pVCpu->iem.s.abOpcode[offOpcode + 4],
3190 pVCpu->iem.s.abOpcode[offOpcode + 5],
3191 pVCpu->iem.s.abOpcode[offOpcode + 6],
3192 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3193# endif
3194 }
3195# endif
3196 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3197}
3198
3199#endif /* IEM_WITH_SETJMP */
3200
3201/**
3202 * Fetches the next opcode quad word, returns automatically on failure.
3203 *
3204 * @param a_pu64 Where to return the opcode quad word.
3205 * @remark Implicitly references pVCpu.
3206 */
3207#ifndef IEM_WITH_SETJMP
3208# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3209 do \
3210 { \
3211 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3212 if (rcStrict2 != VINF_SUCCESS) \
3213 return rcStrict2; \
3214 } while (0)
3215#else
3216# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3217#endif
3218
3219
3220/** @name Misc Worker Functions.
3221 * @{
3222 */
3223
3224/**
3225 * Gets the exception class for the specified exception vector.
3226 *
3227 * @returns The class of the specified exception.
3228 * @param uVector The exception vector.
3229 */
3230IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3231{
3232 Assert(uVector <= X86_XCPT_LAST);
3233 switch (uVector)
3234 {
3235 case X86_XCPT_DE:
3236 case X86_XCPT_TS:
3237 case X86_XCPT_NP:
3238 case X86_XCPT_SS:
3239 case X86_XCPT_GP:
3240 case X86_XCPT_SX: /* AMD only */
3241 return IEMXCPTCLASS_CONTRIBUTORY;
3242
3243 case X86_XCPT_PF:
3244 case X86_XCPT_VE: /* Intel only */
3245 return IEMXCPTCLASS_PAGE_FAULT;
3246 }
3247 return IEMXCPTCLASS_BENIGN;
3248}
3249
3250
3251/**
3252 * Evaluates how to handle an exception caused during delivery of another event
3253 * (exception / interrupt).
3254 *
3255 * @returns How to handle the recursive exception.
3256 * @param pVCpu The cross context virtual CPU structure of the
3257 * calling thread.
3258 * @param fPrevFlags The flags of the previous event.
3259 * @param uPrevVector The vector of the previous event.
3260 * @param fCurFlags The flags of the current exception.
3261 * @param uCurVector The vector of the current exception.
3262 * @param pfXcptRaiseInfo Where to store additional information about the
3263 * exception condition. Optional.
3264 */
3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3266 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3267{
3268 /*
3269 * Only CPU exceptions can be raised while delivering other events, software interrupt
3270 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3271 */
3272 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3273 Assert(pVCpu); RT_NOREF(pVCpu);
3274
3275 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3276 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3277 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3278 {
3279 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3280 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3281 {
3282 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3283 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3284 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3286 {
3287 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3288 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3289 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3290 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3291 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3292 }
3293 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3294 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3295 {
3296 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3297 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3298 }
3299 else if ( uPrevVector == X86_XCPT_DF
3300 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3301 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3302 {
3303 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3304 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3305 }
3306 }
3307 else
3308 {
3309 if (uPrevVector == X86_XCPT_NMI)
3310 {
3311 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3312 if (uCurVector == X86_XCPT_PF)
3313 {
3314 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3315 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3316 }
3317 }
3318 else if ( uPrevVector == X86_XCPT_AC
3319 && uCurVector == X86_XCPT_AC)
3320 {
3321 enmRaise = IEMXCPTRAISE_CPU_HANG;
3322 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3323 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3324 }
3325 }
3326 }
3327 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3328 {
3329 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3330 if (uCurVector == X86_XCPT_PF)
3331 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3332 }
3333 else
3334 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3335
3336 if (pfXcptRaiseInfo)
3337 *pfXcptRaiseInfo = fRaiseInfo;
3338 return enmRaise;
3339}
3340
3341
3342/**
3343 * Enters the CPU shutdown state initiated by a triple fault or other
3344 * unrecoverable conditions.
3345 *
3346 * @returns Strict VBox status code.
3347 * @param pVCpu The cross context virtual CPU structure of the
3348 * calling thread.
3349 */
3350IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3351{
3352 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3353 {
3354 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3355 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3356 }
3357
3358 RT_NOREF(pVCpu);
3359 return VINF_EM_TRIPLE_FAULT;
3360}
3361
3362
3363#ifdef VBOX_WITH_NESTED_HWVIRT
3364IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
3365 uint32_t uErr, uint64_t uCr2)
3366{
3367 Assert(IEM_IS_SVM_ENABLED(pVCpu));
3368
3369 /*
3370 * Handle nested-guest SVM exception and software interrupt intercepts,
3371 * see AMD spec. 15.12 "Exception Intercepts".
3372 *
3373 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
3374 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
3375 * even when they use a vector in the range 0 to 31.
3376 * - ICEBP should not trigger #DB intercept, but its own intercept.
3377 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
3378 */
3379 /* Check NMI intercept */
3380 if ( u8Vector == X86_XCPT_NMI
3381 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3382 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
3383 {
3384 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
3385 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3386 }
3387
3388 /* Check ICEBP intercept. */
3389 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
3390 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
3391 {
3392 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
3393 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3394 }
3395
3396 /* Check CPU exception intercepts. */
3397 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3398 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
3399 {
3400 Assert(u8Vector <= X86_XCPT_LAST);
3401 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
3402 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
3403 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
3404 && u8Vector == X86_XCPT_PF
3405 && !(uErr & X86_TRAP_PF_ID))
3406 {
3407 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
3408#ifdef IEM_WITH_CODE_TLB
3409#else
3410 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
3411 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
3412 if ( cbCurrent > 0
3413 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
3414 {
3415 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
3416 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
3417 }
3418#endif
3419 }
3420 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",
3421 u8Vector, uExitInfo1, uExitInfo2));
3422 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
3423 }
3424
3425 /* Check software interrupt (INTn) intercepts. */
3426 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3427 | IEM_XCPT_FLAGS_BP_INSTR
3428 | IEM_XCPT_FLAGS_ICEBP_INSTR
3429 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3430 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
3431 {
3432 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
3433 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
3434 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
3435 }
3436
3437 return VINF_HM_INTERCEPT_NOT_ACTIVE;
3438}
3439#endif
3440
3441/**
3442 * Validates a new SS segment.
3443 *
3444 * @returns VBox strict status code.
3445 * @param pVCpu The cross context virtual CPU structure of the
3446 * calling thread.
3447 * @param pCtx The CPU context.
3448 * @param NewSS The new SS selctor.
3449 * @param uCpl The CPL to load the stack for.
3450 * @param pDesc Where to return the descriptor.
3451 */
3452IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3453{
3454 NOREF(pCtx);
3455
3456 /* Null selectors are not allowed (we're not called for dispatching
3457 interrupts with SS=0 in long mode). */
3458 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3459 {
3460 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3461 return iemRaiseTaskSwitchFault0(pVCpu);
3462 }
3463
3464 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3465 if ((NewSS & X86_SEL_RPL) != uCpl)
3466 {
3467 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3468 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3469 }
3470
3471 /*
3472 * Read the descriptor.
3473 */
3474 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3475 if (rcStrict != VINF_SUCCESS)
3476 return rcStrict;
3477
3478 /*
3479 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3480 */
3481 if (!pDesc->Legacy.Gen.u1DescType)
3482 {
3483 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3484 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3485 }
3486
3487 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3488 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3489 {
3490 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3491 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3492 }
3493 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3494 {
3495 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3496 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3497 }
3498
3499 /* Is it there? */
3500 /** @todo testcase: Is this checked before the canonical / limit check below? */
3501 if (!pDesc->Legacy.Gen.u1Present)
3502 {
3503 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3504 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3505 }
3506
3507 return VINF_SUCCESS;
3508}
3509
3510
3511/**
3512 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3513 * not.
3514 *
3515 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3516 * @param a_pCtx The CPU context.
3517 */
3518#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3519# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3520 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3521 ? (a_pCtx)->eflags.u \
3522 : CPUMRawGetEFlags(a_pVCpu) )
3523#else
3524# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3525 ( (a_pCtx)->eflags.u )
3526#endif
3527
3528/**
3529 * Updates the EFLAGS in the correct manner wrt. PATM.
3530 *
3531 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3532 * @param a_pCtx The CPU context.
3533 * @param a_fEfl The new EFLAGS.
3534 */
3535#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3536# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3537 do { \
3538 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3539 (a_pCtx)->eflags.u = (a_fEfl); \
3540 else \
3541 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3542 } while (0)
3543#else
3544# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3545 do { \
3546 (a_pCtx)->eflags.u = (a_fEfl); \
3547 } while (0)
3548#endif
3549
3550
3551/** @} */
3552
3553/** @name Raising Exceptions.
3554 *
3555 * @{
3556 */
3557
3558
3559/**
3560 * Loads the specified stack far pointer from the TSS.
3561 *
3562 * @returns VBox strict status code.
3563 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3564 * @param pCtx The CPU context.
3565 * @param uCpl The CPL to load the stack for.
3566 * @param pSelSS Where to return the new stack segment.
3567 * @param puEsp Where to return the new stack pointer.
3568 */
3569IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3570 PRTSEL pSelSS, uint32_t *puEsp)
3571{
3572 VBOXSTRICTRC rcStrict;
3573 Assert(uCpl < 4);
3574
3575 switch (pCtx->tr.Attr.n.u4Type)
3576 {
3577 /*
3578 * 16-bit TSS (X86TSS16).
3579 */
3580 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3581 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3582 {
3583 uint32_t off = uCpl * 4 + 2;
3584 if (off + 4 <= pCtx->tr.u32Limit)
3585 {
3586 /** @todo check actual access pattern here. */
3587 uint32_t u32Tmp = 0; /* gcc maybe... */
3588 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3589 if (rcStrict == VINF_SUCCESS)
3590 {
3591 *puEsp = RT_LOWORD(u32Tmp);
3592 *pSelSS = RT_HIWORD(u32Tmp);
3593 return VINF_SUCCESS;
3594 }
3595 }
3596 else
3597 {
3598 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3599 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3600 }
3601 break;
3602 }
3603
3604 /*
3605 * 32-bit TSS (X86TSS32).
3606 */
3607 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3608 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3609 {
3610 uint32_t off = uCpl * 8 + 4;
3611 if (off + 7 <= pCtx->tr.u32Limit)
3612 {
3613/** @todo check actual access pattern here. */
3614 uint64_t u64Tmp;
3615 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3616 if (rcStrict == VINF_SUCCESS)
3617 {
3618 *puEsp = u64Tmp & UINT32_MAX;
3619 *pSelSS = (RTSEL)(u64Tmp >> 32);
3620 return VINF_SUCCESS;
3621 }
3622 }
3623 else
3624 {
3625 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3626 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3627 }
3628 break;
3629 }
3630
3631 default:
3632 AssertFailed();
3633 rcStrict = VERR_IEM_IPE_4;
3634 break;
3635 }
3636
3637 *puEsp = 0; /* make gcc happy */
3638 *pSelSS = 0; /* make gcc happy */
3639 return rcStrict;
3640}
3641
3642
3643/**
3644 * Loads the specified stack pointer from the 64-bit TSS.
3645 *
3646 * @returns VBox strict status code.
3647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3648 * @param pCtx The CPU context.
3649 * @param uCpl The CPL to load the stack for.
3650 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3651 * @param puRsp Where to return the new stack pointer.
3652 */
3653IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3654{
3655 Assert(uCpl < 4);
3656 Assert(uIst < 8);
3657 *puRsp = 0; /* make gcc happy */
3658
3659 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3660
3661 uint32_t off;
3662 if (uIst)
3663 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3664 else
3665 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3666 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3667 {
3668 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3669 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3670 }
3671
3672 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3673}
3674
3675
3676/**
3677 * Adjust the CPU state according to the exception being raised.
3678 *
3679 * @param pCtx The CPU context.
3680 * @param u8Vector The exception that has been raised.
3681 */
3682DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3683{
3684 switch (u8Vector)
3685 {
3686 case X86_XCPT_DB:
3687 pCtx->dr[7] &= ~X86_DR7_GD;
3688 break;
3689 /** @todo Read the AMD and Intel exception reference... */
3690 }
3691}
3692
3693
3694/**
3695 * Implements exceptions and interrupts for real mode.
3696 *
3697 * @returns VBox strict status code.
3698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3699 * @param pCtx The CPU context.
3700 * @param cbInstr The number of bytes to offset rIP by in the return
3701 * address.
3702 * @param u8Vector The interrupt / exception vector number.
3703 * @param fFlags The flags.
3704 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3705 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3706 */
3707IEM_STATIC VBOXSTRICTRC
3708iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3709 PCPUMCTX pCtx,
3710 uint8_t cbInstr,
3711 uint8_t u8Vector,
3712 uint32_t fFlags,
3713 uint16_t uErr,
3714 uint64_t uCr2)
3715{
3716 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3717 NOREF(uErr); NOREF(uCr2);
3718
3719 /*
3720 * Read the IDT entry.
3721 */
3722 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3723 {
3724 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3725 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3726 }
3727 RTFAR16 Idte;
3728 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3729 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3730 return rcStrict;
3731
3732 /*
3733 * Push the stack frame.
3734 */
3735 uint16_t *pu16Frame;
3736 uint64_t uNewRsp;
3737 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3738 if (rcStrict != VINF_SUCCESS)
3739 return rcStrict;
3740
3741 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3742#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3743 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3744 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3745 fEfl |= UINT16_C(0xf000);
3746#endif
3747 pu16Frame[2] = (uint16_t)fEfl;
3748 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3749 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3750 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3751 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3752 return rcStrict;
3753
3754 /*
3755 * Load the vector address into cs:ip and make exception specific state
3756 * adjustments.
3757 */
3758 pCtx->cs.Sel = Idte.sel;
3759 pCtx->cs.ValidSel = Idte.sel;
3760 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3761 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3762 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3763 pCtx->rip = Idte.off;
3764 fEfl &= ~X86_EFL_IF;
3765 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3766
3767 /** @todo do we actually do this in real mode? */
3768 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3769 iemRaiseXcptAdjustState(pCtx, u8Vector);
3770
3771 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3772}
3773
3774
3775/**
3776 * Loads a NULL data selector into when coming from V8086 mode.
3777 *
3778 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3779 * @param pSReg Pointer to the segment register.
3780 */
3781IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3782{
3783 pSReg->Sel = 0;
3784 pSReg->ValidSel = 0;
3785 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3786 {
3787 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3788 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3789 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3790 }
3791 else
3792 {
3793 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3794 /** @todo check this on AMD-V */
3795 pSReg->u64Base = 0;
3796 pSReg->u32Limit = 0;
3797 }
3798}
3799
3800
3801/**
3802 * Loads a segment selector during a task switch in V8086 mode.
3803 *
3804 * @param pSReg Pointer to the segment register.
3805 * @param uSel The selector value to load.
3806 */
3807IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3808{
3809 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3810 pSReg->Sel = uSel;
3811 pSReg->ValidSel = uSel;
3812 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3813 pSReg->u64Base = uSel << 4;
3814 pSReg->u32Limit = 0xffff;
3815 pSReg->Attr.u = 0xf3;
3816}
3817
3818
3819/**
3820 * Loads a NULL data selector into a selector register, both the hidden and
3821 * visible parts, in protected mode.
3822 *
3823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3824 * @param pSReg Pointer to the segment register.
3825 * @param uRpl The RPL.
3826 */
3827IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3828{
3829 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3830 * data selector in protected mode. */
3831 pSReg->Sel = uRpl;
3832 pSReg->ValidSel = uRpl;
3833 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3834 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3835 {
3836 /* VT-x (Intel 3960x) observed doing something like this. */
3837 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3838 pSReg->u32Limit = UINT32_MAX;
3839 pSReg->u64Base = 0;
3840 }
3841 else
3842 {
3843 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3844 pSReg->u32Limit = 0;
3845 pSReg->u64Base = 0;
3846 }
3847}
3848
3849
3850/**
3851 * Loads a segment selector during a task switch in protected mode.
3852 *
3853 * In this task switch scenario, we would throw \#TS exceptions rather than
3854 * \#GPs.
3855 *
3856 * @returns VBox strict status code.
3857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3858 * @param pSReg Pointer to the segment register.
3859 * @param uSel The new selector value.
3860 *
3861 * @remarks This does _not_ handle CS or SS.
3862 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3863 */
3864IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3865{
3866 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3867
3868 /* Null data selector. */
3869 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3870 {
3871 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3872 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3873 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3874 return VINF_SUCCESS;
3875 }
3876
3877 /* Fetch the descriptor. */
3878 IEMSELDESC Desc;
3879 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3880 if (rcStrict != VINF_SUCCESS)
3881 {
3882 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3883 VBOXSTRICTRC_VAL(rcStrict)));
3884 return rcStrict;
3885 }
3886
3887 /* Must be a data segment or readable code segment. */
3888 if ( !Desc.Legacy.Gen.u1DescType
3889 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3890 {
3891 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3892 Desc.Legacy.Gen.u4Type));
3893 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3894 }
3895
3896 /* Check privileges for data segments and non-conforming code segments. */
3897 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3898 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3899 {
3900 /* The RPL and the new CPL must be less than or equal to the DPL. */
3901 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3902 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3903 {
3904 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3905 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3906 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3907 }
3908 }
3909
3910 /* Is it there? */
3911 if (!Desc.Legacy.Gen.u1Present)
3912 {
3913 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3914 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3915 }
3916
3917 /* The base and limit. */
3918 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3919 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3920
3921 /*
3922 * Ok, everything checked out fine. Now set the accessed bit before
3923 * committing the result into the registers.
3924 */
3925 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3926 {
3927 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3928 if (rcStrict != VINF_SUCCESS)
3929 return rcStrict;
3930 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3931 }
3932
3933 /* Commit */
3934 pSReg->Sel = uSel;
3935 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3936 pSReg->u32Limit = cbLimit;
3937 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3938 pSReg->ValidSel = uSel;
3939 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3940 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3941 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3942
3943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3944 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3945 return VINF_SUCCESS;
3946}
3947
3948
3949/**
3950 * Performs a task switch.
3951 *
3952 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3953 * caller is responsible for performing the necessary checks (like DPL, TSS
3954 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3955 * reference for JMP, CALL, IRET.
3956 *
3957 * If the task switch is the due to a software interrupt or hardware exception,
3958 * the caller is responsible for validating the TSS selector and descriptor. See
3959 * Intel Instruction reference for INT n.
3960 *
3961 * @returns VBox strict status code.
3962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3963 * @param pCtx The CPU context.
3964 * @param enmTaskSwitch What caused this task switch.
3965 * @param uNextEip The EIP effective after the task switch.
3966 * @param fFlags The flags.
3967 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3968 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3969 * @param SelTSS The TSS selector of the new task.
3970 * @param pNewDescTSS Pointer to the new TSS descriptor.
3971 */
3972IEM_STATIC VBOXSTRICTRC
3973iemTaskSwitch(PVMCPU pVCpu,
3974 PCPUMCTX pCtx,
3975 IEMTASKSWITCH enmTaskSwitch,
3976 uint32_t uNextEip,
3977 uint32_t fFlags,
3978 uint16_t uErr,
3979 uint64_t uCr2,
3980 RTSEL SelTSS,
3981 PIEMSELDESC pNewDescTSS)
3982{
3983 Assert(!IEM_IS_REAL_MODE(pVCpu));
3984 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3985
3986 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3987 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3988 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3989 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3990 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3991
3992 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3993 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3994
3995 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3996 fIsNewTSS386, pCtx->eip, uNextEip));
3997
3998 /* Update CR2 in case it's a page-fault. */
3999 /** @todo This should probably be done much earlier in IEM/PGM. See
4000 * @bugref{5653#c49}. */
4001 if (fFlags & IEM_XCPT_FLAGS_CR2)
4002 pCtx->cr2 = uCr2;
4003
4004 /*
4005 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4006 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4007 */
4008 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4009 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4010 if (uNewTSSLimit < uNewTSSLimitMin)
4011 {
4012 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4013 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4014 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4015 }
4016
4017 /*
4018 * Check the current TSS limit. The last written byte to the current TSS during the
4019 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4020 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4021 *
4022 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4023 * end up with smaller than "legal" TSS limits.
4024 */
4025 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
4026 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4027 if (uCurTSSLimit < uCurTSSLimitMin)
4028 {
4029 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4030 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4031 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4032 }
4033
4034 /*
4035 * Verify that the new TSS can be accessed and map it. Map only the required contents
4036 * and not the entire TSS.
4037 */
4038 void *pvNewTSS;
4039 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4040 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4041 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4042 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4043 * not perform correct translation if this happens. See Intel spec. 7.2.1
4044 * "Task-State Segment" */
4045 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4046 if (rcStrict != VINF_SUCCESS)
4047 {
4048 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4049 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4050 return rcStrict;
4051 }
4052
4053 /*
4054 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4055 */
4056 uint32_t u32EFlags = pCtx->eflags.u32;
4057 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4058 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4059 {
4060 PX86DESC pDescCurTSS;
4061 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4062 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4063 if (rcStrict != VINF_SUCCESS)
4064 {
4065 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4066 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4067 return rcStrict;
4068 }
4069
4070 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4071 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4072 if (rcStrict != VINF_SUCCESS)
4073 {
4074 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4075 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4076 return rcStrict;
4077 }
4078
4079 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4080 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4081 {
4082 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4083 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4084 u32EFlags &= ~X86_EFL_NT;
4085 }
4086 }
4087
4088 /*
4089 * Save the CPU state into the current TSS.
4090 */
4091 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4092 if (GCPtrNewTSS == GCPtrCurTSS)
4093 {
4094 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4095 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4096 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4097 }
4098 if (fIsNewTSS386)
4099 {
4100 /*
4101 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4102 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4103 */
4104 void *pvCurTSS32;
4105 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4106 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4107 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4108 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4109 if (rcStrict != VINF_SUCCESS)
4110 {
4111 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4112 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4113 return rcStrict;
4114 }
4115
4116 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4117 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4118 pCurTSS32->eip = uNextEip;
4119 pCurTSS32->eflags = u32EFlags;
4120 pCurTSS32->eax = pCtx->eax;
4121 pCurTSS32->ecx = pCtx->ecx;
4122 pCurTSS32->edx = pCtx->edx;
4123 pCurTSS32->ebx = pCtx->ebx;
4124 pCurTSS32->esp = pCtx->esp;
4125 pCurTSS32->ebp = pCtx->ebp;
4126 pCurTSS32->esi = pCtx->esi;
4127 pCurTSS32->edi = pCtx->edi;
4128 pCurTSS32->es = pCtx->es.Sel;
4129 pCurTSS32->cs = pCtx->cs.Sel;
4130 pCurTSS32->ss = pCtx->ss.Sel;
4131 pCurTSS32->ds = pCtx->ds.Sel;
4132 pCurTSS32->fs = pCtx->fs.Sel;
4133 pCurTSS32->gs = pCtx->gs.Sel;
4134
4135 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4136 if (rcStrict != VINF_SUCCESS)
4137 {
4138 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4139 VBOXSTRICTRC_VAL(rcStrict)));
4140 return rcStrict;
4141 }
4142 }
4143 else
4144 {
4145 /*
4146 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4147 */
4148 void *pvCurTSS16;
4149 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4150 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4151 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4152 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4153 if (rcStrict != VINF_SUCCESS)
4154 {
4155 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4156 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4157 return rcStrict;
4158 }
4159
4160 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4161 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4162 pCurTSS16->ip = uNextEip;
4163 pCurTSS16->flags = u32EFlags;
4164 pCurTSS16->ax = pCtx->ax;
4165 pCurTSS16->cx = pCtx->cx;
4166 pCurTSS16->dx = pCtx->dx;
4167 pCurTSS16->bx = pCtx->bx;
4168 pCurTSS16->sp = pCtx->sp;
4169 pCurTSS16->bp = pCtx->bp;
4170 pCurTSS16->si = pCtx->si;
4171 pCurTSS16->di = pCtx->di;
4172 pCurTSS16->es = pCtx->es.Sel;
4173 pCurTSS16->cs = pCtx->cs.Sel;
4174 pCurTSS16->ss = pCtx->ss.Sel;
4175 pCurTSS16->ds = pCtx->ds.Sel;
4176
4177 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4178 if (rcStrict != VINF_SUCCESS)
4179 {
4180 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4181 VBOXSTRICTRC_VAL(rcStrict)));
4182 return rcStrict;
4183 }
4184 }
4185
4186 /*
4187 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4188 */
4189 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4190 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4191 {
4192 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4193 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4194 pNewTSS->selPrev = pCtx->tr.Sel;
4195 }
4196
4197 /*
4198 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4199 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4200 */
4201 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4202 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4203 bool fNewDebugTrap;
4204 if (fIsNewTSS386)
4205 {
4206 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4207 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4208 uNewEip = pNewTSS32->eip;
4209 uNewEflags = pNewTSS32->eflags;
4210 uNewEax = pNewTSS32->eax;
4211 uNewEcx = pNewTSS32->ecx;
4212 uNewEdx = pNewTSS32->edx;
4213 uNewEbx = pNewTSS32->ebx;
4214 uNewEsp = pNewTSS32->esp;
4215 uNewEbp = pNewTSS32->ebp;
4216 uNewEsi = pNewTSS32->esi;
4217 uNewEdi = pNewTSS32->edi;
4218 uNewES = pNewTSS32->es;
4219 uNewCS = pNewTSS32->cs;
4220 uNewSS = pNewTSS32->ss;
4221 uNewDS = pNewTSS32->ds;
4222 uNewFS = pNewTSS32->fs;
4223 uNewGS = pNewTSS32->gs;
4224 uNewLdt = pNewTSS32->selLdt;
4225 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4226 }
4227 else
4228 {
4229 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4230 uNewCr3 = 0;
4231 uNewEip = pNewTSS16->ip;
4232 uNewEflags = pNewTSS16->flags;
4233 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4234 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4235 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4236 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4237 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4238 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4239 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4240 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4241 uNewES = pNewTSS16->es;
4242 uNewCS = pNewTSS16->cs;
4243 uNewSS = pNewTSS16->ss;
4244 uNewDS = pNewTSS16->ds;
4245 uNewFS = 0;
4246 uNewGS = 0;
4247 uNewLdt = pNewTSS16->selLdt;
4248 fNewDebugTrap = false;
4249 }
4250
4251 if (GCPtrNewTSS == GCPtrCurTSS)
4252 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4253 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4254
4255 /*
4256 * We're done accessing the new TSS.
4257 */
4258 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4259 if (rcStrict != VINF_SUCCESS)
4260 {
4261 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264
4265 /*
4266 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4267 */
4268 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4269 {
4270 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4271 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4272 if (rcStrict != VINF_SUCCESS)
4273 {
4274 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4275 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4276 return rcStrict;
4277 }
4278
4279 /* Check that the descriptor indicates the new TSS is available (not busy). */
4280 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4281 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4282 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4283
4284 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4285 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4286 if (rcStrict != VINF_SUCCESS)
4287 {
4288 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4289 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4290 return rcStrict;
4291 }
4292 }
4293
4294 /*
4295 * From this point on, we're technically in the new task. We will defer exceptions
4296 * until the completion of the task switch but before executing any instructions in the new task.
4297 */
4298 pCtx->tr.Sel = SelTSS;
4299 pCtx->tr.ValidSel = SelTSS;
4300 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4301 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4302 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4303 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4304 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4305
4306 /* Set the busy bit in TR. */
4307 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4308 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4309 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4310 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4311 {
4312 uNewEflags |= X86_EFL_NT;
4313 }
4314
4315 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4316 pCtx->cr0 |= X86_CR0_TS;
4317 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4318
4319 pCtx->eip = uNewEip;
4320 pCtx->eax = uNewEax;
4321 pCtx->ecx = uNewEcx;
4322 pCtx->edx = uNewEdx;
4323 pCtx->ebx = uNewEbx;
4324 pCtx->esp = uNewEsp;
4325 pCtx->ebp = uNewEbp;
4326 pCtx->esi = uNewEsi;
4327 pCtx->edi = uNewEdi;
4328
4329 uNewEflags &= X86_EFL_LIVE_MASK;
4330 uNewEflags |= X86_EFL_RA1_MASK;
4331 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4332
4333 /*
4334 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4335 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4336 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4337 */
4338 pCtx->es.Sel = uNewES;
4339 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4340
4341 pCtx->cs.Sel = uNewCS;
4342 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4343
4344 pCtx->ss.Sel = uNewSS;
4345 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4346
4347 pCtx->ds.Sel = uNewDS;
4348 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4349
4350 pCtx->fs.Sel = uNewFS;
4351 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4352
4353 pCtx->gs.Sel = uNewGS;
4354 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4355 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4356
4357 pCtx->ldtr.Sel = uNewLdt;
4358 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4359 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4360 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4361
4362 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4363 {
4364 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4365 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4366 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4367 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4368 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4369 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4370 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4371 }
4372
4373 /*
4374 * Switch CR3 for the new task.
4375 */
4376 if ( fIsNewTSS386
4377 && (pCtx->cr0 & X86_CR0_PG))
4378 {
4379 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4380 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4381 {
4382 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4383 AssertRCSuccessReturn(rc, rc);
4384 }
4385 else
4386 pCtx->cr3 = uNewCr3;
4387
4388 /* Inform PGM. */
4389 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4390 {
4391 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4392 AssertRCReturn(rc, rc);
4393 /* ignore informational status codes */
4394 }
4395 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4396 }
4397
4398 /*
4399 * Switch LDTR for the new task.
4400 */
4401 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4402 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4403 else
4404 {
4405 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4406
4407 IEMSELDESC DescNewLdt;
4408 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4409 if (rcStrict != VINF_SUCCESS)
4410 {
4411 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4412 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4413 return rcStrict;
4414 }
4415 if ( !DescNewLdt.Legacy.Gen.u1Present
4416 || DescNewLdt.Legacy.Gen.u1DescType
4417 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4418 {
4419 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4420 uNewLdt, DescNewLdt.Legacy.u));
4421 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4422 }
4423
4424 pCtx->ldtr.ValidSel = uNewLdt;
4425 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4426 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4427 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4428 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4429 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4430 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4431 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4432 }
4433
4434 IEMSELDESC DescSS;
4435 if (IEM_IS_V86_MODE(pVCpu))
4436 {
4437 pVCpu->iem.s.uCpl = 3;
4438 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4439 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4440 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4441 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4442 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4443 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4444
4445 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4446 DescSS.Legacy.u = 0;
4447 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4448 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4449 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4450 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4451 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4452 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4453 DescSS.Legacy.Gen.u2Dpl = 3;
4454 }
4455 else
4456 {
4457 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4458
4459 /*
4460 * Load the stack segment for the new task.
4461 */
4462 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4463 {
4464 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4465 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4466 }
4467
4468 /* Fetch the descriptor. */
4469 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4470 if (rcStrict != VINF_SUCCESS)
4471 {
4472 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4473 VBOXSTRICTRC_VAL(rcStrict)));
4474 return rcStrict;
4475 }
4476
4477 /* SS must be a data segment and writable. */
4478 if ( !DescSS.Legacy.Gen.u1DescType
4479 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4480 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4481 {
4482 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4483 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4484 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4485 }
4486
4487 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4488 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4489 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4490 {
4491 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4492 uNewCpl));
4493 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4494 }
4495
4496 /* Is it there? */
4497 if (!DescSS.Legacy.Gen.u1Present)
4498 {
4499 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4500 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4501 }
4502
4503 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4504 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4505
4506 /* Set the accessed bit before committing the result into SS. */
4507 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4508 {
4509 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4510 if (rcStrict != VINF_SUCCESS)
4511 return rcStrict;
4512 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4513 }
4514
4515 /* Commit SS. */
4516 pCtx->ss.Sel = uNewSS;
4517 pCtx->ss.ValidSel = uNewSS;
4518 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4519 pCtx->ss.u32Limit = cbLimit;
4520 pCtx->ss.u64Base = u64Base;
4521 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4522 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4523
4524 /* CPL has changed, update IEM before loading rest of segments. */
4525 pVCpu->iem.s.uCpl = uNewCpl;
4526
4527 /*
4528 * Load the data segments for the new task.
4529 */
4530 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4531 if (rcStrict != VINF_SUCCESS)
4532 return rcStrict;
4533 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4534 if (rcStrict != VINF_SUCCESS)
4535 return rcStrict;
4536 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4537 if (rcStrict != VINF_SUCCESS)
4538 return rcStrict;
4539 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4540 if (rcStrict != VINF_SUCCESS)
4541 return rcStrict;
4542
4543 /*
4544 * Load the code segment for the new task.
4545 */
4546 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4547 {
4548 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4550 }
4551
4552 /* Fetch the descriptor. */
4553 IEMSELDESC DescCS;
4554 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4555 if (rcStrict != VINF_SUCCESS)
4556 {
4557 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4558 return rcStrict;
4559 }
4560
4561 /* CS must be a code segment. */
4562 if ( !DescCS.Legacy.Gen.u1DescType
4563 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4564 {
4565 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4566 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4568 }
4569
4570 /* For conforming CS, DPL must be less than or equal to the RPL. */
4571 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4572 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4573 {
4574 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4575 DescCS.Legacy.Gen.u2Dpl));
4576 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4577 }
4578
4579 /* For non-conforming CS, DPL must match RPL. */
4580 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4581 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4582 {
4583 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4584 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4585 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4586 }
4587
4588 /* Is it there? */
4589 if (!DescCS.Legacy.Gen.u1Present)
4590 {
4591 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4592 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4593 }
4594
4595 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4596 u64Base = X86DESC_BASE(&DescCS.Legacy);
4597
4598 /* Set the accessed bit before committing the result into CS. */
4599 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4600 {
4601 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4602 if (rcStrict != VINF_SUCCESS)
4603 return rcStrict;
4604 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4605 }
4606
4607 /* Commit CS. */
4608 pCtx->cs.Sel = uNewCS;
4609 pCtx->cs.ValidSel = uNewCS;
4610 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4611 pCtx->cs.u32Limit = cbLimit;
4612 pCtx->cs.u64Base = u64Base;
4613 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4614 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4615 }
4616
4617 /** @todo Debug trap. */
4618 if (fIsNewTSS386 && fNewDebugTrap)
4619 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4620
4621 /*
4622 * Construct the error code masks based on what caused this task switch.
4623 * See Intel Instruction reference for INT.
4624 */
4625 uint16_t uExt;
4626 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4627 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4628 {
4629 uExt = 1;
4630 }
4631 else
4632 uExt = 0;
4633
4634 /*
4635 * Push any error code on to the new stack.
4636 */
4637 if (fFlags & IEM_XCPT_FLAGS_ERR)
4638 {
4639 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4640 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4641 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4642
4643 /* Check that there is sufficient space on the stack. */
4644 /** @todo Factor out segment limit checking for normal/expand down segments
4645 * into a separate function. */
4646 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4647 {
4648 if ( pCtx->esp - 1 > cbLimitSS
4649 || pCtx->esp < cbStackFrame)
4650 {
4651 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4652 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4653 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4654 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4655 }
4656 }
4657 else
4658 {
4659 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4660 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4661 {
4662 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4663 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4664 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4665 }
4666 }
4667
4668
4669 if (fIsNewTSS386)
4670 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4671 else
4672 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4673 if (rcStrict != VINF_SUCCESS)
4674 {
4675 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4676 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4677 return rcStrict;
4678 }
4679 }
4680
4681 /* Check the new EIP against the new CS limit. */
4682 if (pCtx->eip > pCtx->cs.u32Limit)
4683 {
4684 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4685 pCtx->eip, pCtx->cs.u32Limit));
4686 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4687 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4688 }
4689
4690 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4691 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4692}
4693
4694
4695/**
4696 * Implements exceptions and interrupts for protected mode.
4697 *
4698 * @returns VBox strict status code.
4699 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4700 * @param pCtx The CPU context.
4701 * @param cbInstr The number of bytes to offset rIP by in the return
4702 * address.
4703 * @param u8Vector The interrupt / exception vector number.
4704 * @param fFlags The flags.
4705 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4706 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4707 */
4708IEM_STATIC VBOXSTRICTRC
4709iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4710 PCPUMCTX pCtx,
4711 uint8_t cbInstr,
4712 uint8_t u8Vector,
4713 uint32_t fFlags,
4714 uint16_t uErr,
4715 uint64_t uCr2)
4716{
4717 /*
4718 * Read the IDT entry.
4719 */
4720 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4721 {
4722 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4723 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4724 }
4725 X86DESC Idte;
4726 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4727 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4728 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4729 return rcStrict;
4730 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4731 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4732 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4733
4734 /*
4735 * Check the descriptor type, DPL and such.
4736 * ASSUMES this is done in the same order as described for call-gate calls.
4737 */
4738 if (Idte.Gate.u1DescType)
4739 {
4740 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4741 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4742 }
4743 bool fTaskGate = false;
4744 uint8_t f32BitGate = true;
4745 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4746 switch (Idte.Gate.u4Type)
4747 {
4748 case X86_SEL_TYPE_SYS_UNDEFINED:
4749 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4750 case X86_SEL_TYPE_SYS_LDT:
4751 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4752 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4753 case X86_SEL_TYPE_SYS_UNDEFINED2:
4754 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4755 case X86_SEL_TYPE_SYS_UNDEFINED3:
4756 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4757 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4758 case X86_SEL_TYPE_SYS_UNDEFINED4:
4759 {
4760 /** @todo check what actually happens when the type is wrong...
4761 * esp. call gates. */
4762 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4763 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4764 }
4765
4766 case X86_SEL_TYPE_SYS_286_INT_GATE:
4767 f32BitGate = false;
4768 /* fall thru */
4769 case X86_SEL_TYPE_SYS_386_INT_GATE:
4770 fEflToClear |= X86_EFL_IF;
4771 break;
4772
4773 case X86_SEL_TYPE_SYS_TASK_GATE:
4774 fTaskGate = true;
4775#ifndef IEM_IMPLEMENTS_TASKSWITCH
4776 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4777#endif
4778 break;
4779
4780 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4781 f32BitGate = false;
4782 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4783 break;
4784
4785 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4786 }
4787
4788 /* Check DPL against CPL if applicable. */
4789 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4790 {
4791 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4792 {
4793 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4794 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4795 }
4796 }
4797
4798 /* Is it there? */
4799 if (!Idte.Gate.u1Present)
4800 {
4801 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4802 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4803 }
4804
4805 /* Is it a task-gate? */
4806 if (fTaskGate)
4807 {
4808 /*
4809 * Construct the error code masks based on what caused this task switch.
4810 * See Intel Instruction reference for INT.
4811 */
4812 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4813 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4814 RTSEL SelTSS = Idte.Gate.u16Sel;
4815
4816 /*
4817 * Fetch the TSS descriptor in the GDT.
4818 */
4819 IEMSELDESC DescTSS;
4820 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4821 if (rcStrict != VINF_SUCCESS)
4822 {
4823 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4824 VBOXSTRICTRC_VAL(rcStrict)));
4825 return rcStrict;
4826 }
4827
4828 /* The TSS descriptor must be a system segment and be available (not busy). */
4829 if ( DescTSS.Legacy.Gen.u1DescType
4830 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4831 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4832 {
4833 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4834 u8Vector, SelTSS, DescTSS.Legacy.au64));
4835 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4836 }
4837
4838 /* The TSS must be present. */
4839 if (!DescTSS.Legacy.Gen.u1Present)
4840 {
4841 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4842 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4843 }
4844
4845 /* Do the actual task switch. */
4846 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4847 }
4848
4849 /* A null CS is bad. */
4850 RTSEL NewCS = Idte.Gate.u16Sel;
4851 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4852 {
4853 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4854 return iemRaiseGeneralProtectionFault0(pVCpu);
4855 }
4856
4857 /* Fetch the descriptor for the new CS. */
4858 IEMSELDESC DescCS;
4859 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4860 if (rcStrict != VINF_SUCCESS)
4861 {
4862 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4863 return rcStrict;
4864 }
4865
4866 /* Must be a code segment. */
4867 if (!DescCS.Legacy.Gen.u1DescType)
4868 {
4869 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4870 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4871 }
4872 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4873 {
4874 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4875 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4876 }
4877
4878 /* Don't allow lowering the privilege level. */
4879 /** @todo Does the lowering of privileges apply to software interrupts
4880 * only? This has bearings on the more-privileged or
4881 * same-privilege stack behavior further down. A testcase would
4882 * be nice. */
4883 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4884 {
4885 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4886 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4887 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4888 }
4889
4890 /* Make sure the selector is present. */
4891 if (!DescCS.Legacy.Gen.u1Present)
4892 {
4893 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4894 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4895 }
4896
4897 /* Check the new EIP against the new CS limit. */
4898 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4899 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4900 ? Idte.Gate.u16OffsetLow
4901 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4902 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4903 if (uNewEip > cbLimitCS)
4904 {
4905 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4906 u8Vector, uNewEip, cbLimitCS, NewCS));
4907 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4908 }
4909 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4910
4911 /* Calc the flag image to push. */
4912 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4913 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4914 fEfl &= ~X86_EFL_RF;
4915 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4916 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4917
4918 /* From V8086 mode only go to CPL 0. */
4919 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4920 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4921 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4922 {
4923 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4924 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4925 }
4926
4927 /*
4928 * If the privilege level changes, we need to get a new stack from the TSS.
4929 * This in turns means validating the new SS and ESP...
4930 */
4931 if (uNewCpl != pVCpu->iem.s.uCpl)
4932 {
4933 RTSEL NewSS;
4934 uint32_t uNewEsp;
4935 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4936 if (rcStrict != VINF_SUCCESS)
4937 return rcStrict;
4938
4939 IEMSELDESC DescSS;
4940 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4941 if (rcStrict != VINF_SUCCESS)
4942 return rcStrict;
4943 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4944 if (!DescSS.Legacy.Gen.u1DefBig)
4945 {
4946 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4947 uNewEsp = (uint16_t)uNewEsp;
4948 }
4949
4950 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4951
4952 /* Check that there is sufficient space for the stack frame. */
4953 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4954 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4955 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4956 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4957
4958 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4959 {
4960 if ( uNewEsp - 1 > cbLimitSS
4961 || uNewEsp < cbStackFrame)
4962 {
4963 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4964 u8Vector, NewSS, uNewEsp, cbStackFrame));
4965 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4966 }
4967 }
4968 else
4969 {
4970 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4971 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4972 {
4973 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4974 u8Vector, NewSS, uNewEsp, cbStackFrame));
4975 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4976 }
4977 }
4978
4979 /*
4980 * Start making changes.
4981 */
4982
4983 /* Set the new CPL so that stack accesses use it. */
4984 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4985 pVCpu->iem.s.uCpl = uNewCpl;
4986
4987 /* Create the stack frame. */
4988 RTPTRUNION uStackFrame;
4989 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4990 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4991 if (rcStrict != VINF_SUCCESS)
4992 return rcStrict;
4993 void * const pvStackFrame = uStackFrame.pv;
4994 if (f32BitGate)
4995 {
4996 if (fFlags & IEM_XCPT_FLAGS_ERR)
4997 *uStackFrame.pu32++ = uErr;
4998 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4999 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5000 uStackFrame.pu32[2] = fEfl;
5001 uStackFrame.pu32[3] = pCtx->esp;
5002 uStackFrame.pu32[4] = pCtx->ss.Sel;
5003 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
5004 if (fEfl & X86_EFL_VM)
5005 {
5006 uStackFrame.pu32[1] = pCtx->cs.Sel;
5007 uStackFrame.pu32[5] = pCtx->es.Sel;
5008 uStackFrame.pu32[6] = pCtx->ds.Sel;
5009 uStackFrame.pu32[7] = pCtx->fs.Sel;
5010 uStackFrame.pu32[8] = pCtx->gs.Sel;
5011 }
5012 }
5013 else
5014 {
5015 if (fFlags & IEM_XCPT_FLAGS_ERR)
5016 *uStackFrame.pu16++ = uErr;
5017 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
5018 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5019 uStackFrame.pu16[2] = fEfl;
5020 uStackFrame.pu16[3] = pCtx->sp;
5021 uStackFrame.pu16[4] = pCtx->ss.Sel;
5022 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
5023 if (fEfl & X86_EFL_VM)
5024 {
5025 uStackFrame.pu16[1] = pCtx->cs.Sel;
5026 uStackFrame.pu16[5] = pCtx->es.Sel;
5027 uStackFrame.pu16[6] = pCtx->ds.Sel;
5028 uStackFrame.pu16[7] = pCtx->fs.Sel;
5029 uStackFrame.pu16[8] = pCtx->gs.Sel;
5030 }
5031 }
5032 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5033 if (rcStrict != VINF_SUCCESS)
5034 return rcStrict;
5035
5036 /* Mark the selectors 'accessed' (hope this is the correct time). */
5037 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5038 * after pushing the stack frame? (Write protect the gdt + stack to
5039 * find out.) */
5040 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5041 {
5042 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5043 if (rcStrict != VINF_SUCCESS)
5044 return rcStrict;
5045 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5046 }
5047
5048 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5049 {
5050 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5051 if (rcStrict != VINF_SUCCESS)
5052 return rcStrict;
5053 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5054 }
5055
5056 /*
5057 * Start comitting the register changes (joins with the DPL=CPL branch).
5058 */
5059 pCtx->ss.Sel = NewSS;
5060 pCtx->ss.ValidSel = NewSS;
5061 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5062 pCtx->ss.u32Limit = cbLimitSS;
5063 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5064 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5065 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5066 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5067 * SP is loaded).
5068 * Need to check the other combinations too:
5069 * - 16-bit TSS, 32-bit handler
5070 * - 32-bit TSS, 16-bit handler */
5071 if (!pCtx->ss.Attr.n.u1DefBig)
5072 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5073 else
5074 pCtx->rsp = uNewEsp - cbStackFrame;
5075
5076 if (fEfl & X86_EFL_VM)
5077 {
5078 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5079 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5080 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5081 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5082 }
5083 }
5084 /*
5085 * Same privilege, no stack change and smaller stack frame.
5086 */
5087 else
5088 {
5089 uint64_t uNewRsp;
5090 RTPTRUNION uStackFrame;
5091 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5092 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5093 if (rcStrict != VINF_SUCCESS)
5094 return rcStrict;
5095 void * const pvStackFrame = uStackFrame.pv;
5096
5097 if (f32BitGate)
5098 {
5099 if (fFlags & IEM_XCPT_FLAGS_ERR)
5100 *uStackFrame.pu32++ = uErr;
5101 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5102 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5103 uStackFrame.pu32[2] = fEfl;
5104 }
5105 else
5106 {
5107 if (fFlags & IEM_XCPT_FLAGS_ERR)
5108 *uStackFrame.pu16++ = uErr;
5109 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5110 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5111 uStackFrame.pu16[2] = fEfl;
5112 }
5113 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5114 if (rcStrict != VINF_SUCCESS)
5115 return rcStrict;
5116
5117 /* Mark the CS selector as 'accessed'. */
5118 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5119 {
5120 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5121 if (rcStrict != VINF_SUCCESS)
5122 return rcStrict;
5123 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5124 }
5125
5126 /*
5127 * Start committing the register changes (joins with the other branch).
5128 */
5129 pCtx->rsp = uNewRsp;
5130 }
5131
5132 /* ... register committing continues. */
5133 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5134 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5135 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5136 pCtx->cs.u32Limit = cbLimitCS;
5137 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5138 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5139
5140 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5141 fEfl &= ~fEflToClear;
5142 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5143
5144 if (fFlags & IEM_XCPT_FLAGS_CR2)
5145 pCtx->cr2 = uCr2;
5146
5147 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5148 iemRaiseXcptAdjustState(pCtx, u8Vector);
5149
5150 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5151}
5152
5153
5154/**
5155 * Implements exceptions and interrupts for long mode.
5156 *
5157 * @returns VBox strict status code.
5158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5159 * @param pCtx The CPU context.
5160 * @param cbInstr The number of bytes to offset rIP by in the return
5161 * address.
5162 * @param u8Vector The interrupt / exception vector number.
5163 * @param fFlags The flags.
5164 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5165 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5166 */
5167IEM_STATIC VBOXSTRICTRC
5168iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5169 PCPUMCTX pCtx,
5170 uint8_t cbInstr,
5171 uint8_t u8Vector,
5172 uint32_t fFlags,
5173 uint16_t uErr,
5174 uint64_t uCr2)
5175{
5176 /*
5177 * Read the IDT entry.
5178 */
5179 uint16_t offIdt = (uint16_t)u8Vector << 4;
5180 if (pCtx->idtr.cbIdt < offIdt + 7)
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5183 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5184 }
5185 X86DESC64 Idte;
5186 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5187 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5188 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5189 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5190 return rcStrict;
5191 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5192 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5193 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5194
5195 /*
5196 * Check the descriptor type, DPL and such.
5197 * ASSUMES this is done in the same order as described for call-gate calls.
5198 */
5199 if (Idte.Gate.u1DescType)
5200 {
5201 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5202 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5203 }
5204 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5205 switch (Idte.Gate.u4Type)
5206 {
5207 case AMD64_SEL_TYPE_SYS_INT_GATE:
5208 fEflToClear |= X86_EFL_IF;
5209 break;
5210 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5211 break;
5212
5213 default:
5214 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5215 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5216 }
5217
5218 /* Check DPL against CPL if applicable. */
5219 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5220 {
5221 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5222 {
5223 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5224 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5225 }
5226 }
5227
5228 /* Is it there? */
5229 if (!Idte.Gate.u1Present)
5230 {
5231 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5232 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5233 }
5234
5235 /* A null CS is bad. */
5236 RTSEL NewCS = Idte.Gate.u16Sel;
5237 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5238 {
5239 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5240 return iemRaiseGeneralProtectionFault0(pVCpu);
5241 }
5242
5243 /* Fetch the descriptor for the new CS. */
5244 IEMSELDESC DescCS;
5245 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5246 if (rcStrict != VINF_SUCCESS)
5247 {
5248 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5249 return rcStrict;
5250 }
5251
5252 /* Must be a 64-bit code segment. */
5253 if (!DescCS.Long.Gen.u1DescType)
5254 {
5255 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5256 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5257 }
5258 if ( !DescCS.Long.Gen.u1Long
5259 || DescCS.Long.Gen.u1DefBig
5260 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5261 {
5262 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5263 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5264 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5265 }
5266
5267 /* Don't allow lowering the privilege level. For non-conforming CS
5268 selectors, the CS.DPL sets the privilege level the trap/interrupt
5269 handler runs at. For conforming CS selectors, the CPL remains
5270 unchanged, but the CS.DPL must be <= CPL. */
5271 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5272 * when CPU in Ring-0. Result \#GP? */
5273 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5276 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5277 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5278 }
5279
5280
5281 /* Make sure the selector is present. */
5282 if (!DescCS.Legacy.Gen.u1Present)
5283 {
5284 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5285 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5286 }
5287
5288 /* Check that the new RIP is canonical. */
5289 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5290 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5291 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5292 if (!IEM_IS_CANONICAL(uNewRip))
5293 {
5294 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5295 return iemRaiseGeneralProtectionFault0(pVCpu);
5296 }
5297
5298 /*
5299 * If the privilege level changes or if the IST isn't zero, we need to get
5300 * a new stack from the TSS.
5301 */
5302 uint64_t uNewRsp;
5303 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5304 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5305 if ( uNewCpl != pVCpu->iem.s.uCpl
5306 || Idte.Gate.u3IST != 0)
5307 {
5308 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5309 if (rcStrict != VINF_SUCCESS)
5310 return rcStrict;
5311 }
5312 else
5313 uNewRsp = pCtx->rsp;
5314 uNewRsp &= ~(uint64_t)0xf;
5315
5316 /*
5317 * Calc the flag image to push.
5318 */
5319 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5320 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5321 fEfl &= ~X86_EFL_RF;
5322 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5323 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5324
5325 /*
5326 * Start making changes.
5327 */
5328 /* Set the new CPL so that stack accesses use it. */
5329 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5330 pVCpu->iem.s.uCpl = uNewCpl;
5331
5332 /* Create the stack frame. */
5333 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5334 RTPTRUNION uStackFrame;
5335 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5336 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5337 if (rcStrict != VINF_SUCCESS)
5338 return rcStrict;
5339 void * const pvStackFrame = uStackFrame.pv;
5340
5341 if (fFlags & IEM_XCPT_FLAGS_ERR)
5342 *uStackFrame.pu64++ = uErr;
5343 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5344 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5345 uStackFrame.pu64[2] = fEfl;
5346 uStackFrame.pu64[3] = pCtx->rsp;
5347 uStackFrame.pu64[4] = pCtx->ss.Sel;
5348 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5349 if (rcStrict != VINF_SUCCESS)
5350 return rcStrict;
5351
5352 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5353 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5354 * after pushing the stack frame? (Write protect the gdt + stack to
5355 * find out.) */
5356 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5357 {
5358 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5359 if (rcStrict != VINF_SUCCESS)
5360 return rcStrict;
5361 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5362 }
5363
5364 /*
5365 * Start comitting the register changes.
5366 */
5367 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5368 * hidden registers when interrupting 32-bit or 16-bit code! */
5369 if (uNewCpl != uOldCpl)
5370 {
5371 pCtx->ss.Sel = 0 | uNewCpl;
5372 pCtx->ss.ValidSel = 0 | uNewCpl;
5373 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5374 pCtx->ss.u32Limit = UINT32_MAX;
5375 pCtx->ss.u64Base = 0;
5376 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5377 }
5378 pCtx->rsp = uNewRsp - cbStackFrame;
5379 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5380 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5381 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5382 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5383 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5384 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5385 pCtx->rip = uNewRip;
5386
5387 fEfl &= ~fEflToClear;
5388 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5389
5390 if (fFlags & IEM_XCPT_FLAGS_CR2)
5391 pCtx->cr2 = uCr2;
5392
5393 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5394 iemRaiseXcptAdjustState(pCtx, u8Vector);
5395
5396 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5397}
5398
5399
5400/**
5401 * Implements exceptions and interrupts.
5402 *
5403 * All exceptions and interrupts goes thru this function!
5404 *
5405 * @returns VBox strict status code.
5406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5407 * @param cbInstr The number of bytes to offset rIP by in the return
5408 * address.
5409 * @param u8Vector The interrupt / exception vector number.
5410 * @param fFlags The flags.
5411 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5412 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5413 */
5414DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5415iemRaiseXcptOrInt(PVMCPU pVCpu,
5416 uint8_t cbInstr,
5417 uint8_t u8Vector,
5418 uint32_t fFlags,
5419 uint16_t uErr,
5420 uint64_t uCr2)
5421{
5422 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5423#ifdef IN_RING0
5424 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5425 AssertRCReturn(rc, rc);
5426#endif
5427
5428#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5429 /*
5430 * Flush prefetch buffer
5431 */
5432 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5433#endif
5434
5435 /*
5436 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5437 */
5438 if ( pCtx->eflags.Bits.u1VM
5439 && pCtx->eflags.Bits.u2IOPL != 3
5440 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5441 && (pCtx->cr0 & X86_CR0_PE) )
5442 {
5443 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5444 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5445 u8Vector = X86_XCPT_GP;
5446 uErr = 0;
5447 }
5448#ifdef DBGFTRACE_ENABLED
5449 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5450 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5451 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5452#endif
5453
5454#ifdef VBOX_WITH_NESTED_HWVIRT
5455 if (IEM_IS_SVM_ENABLED(pVCpu))
5456 {
5457 /*
5458 * If the event is being injected as part of VMRUN, it isn't subject to event
5459 * intercepts in the nested-guest. However, secondary exceptions that occur
5460 * during injection of any event -are- subject to exception intercepts.
5461 * See AMD spec. 15.20 "Event Injection".
5462 */
5463 if (!pCtx->hwvirt.svm.fInterceptEvents)
5464 pCtx->hwvirt.svm.fInterceptEvents = 1;
5465 else
5466 {
5467 /*
5468 * Check and handle if the event being raised is intercepted.
5469 */
5470 VBOXSTRICTRC rcStrict0 = iemHandleSvmNstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5471 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5472 return rcStrict0;
5473 }
5474 }
5475#endif /* VBOX_WITH_NESTED_HWVIRT */
5476
5477 /*
5478 * Do recursion accounting.
5479 */
5480 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5481 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5482 if (pVCpu->iem.s.cXcptRecursions == 0)
5483 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5484 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5485 else
5486 {
5487 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5488 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5489 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5490
5491 if (pVCpu->iem.s.cXcptRecursions >= 3)
5492 {
5493#ifdef DEBUG_bird
5494 AssertFailed();
5495#endif
5496 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5497 }
5498
5499 /*
5500 * Evaluate the sequence of recurring events.
5501 */
5502 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5503 NULL /* pXcptRaiseInfo */);
5504 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5505 { /* likely */ }
5506 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5507 {
5508 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5509 u8Vector = X86_XCPT_DF;
5510 uErr = 0;
5511 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5512 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5513 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5514 }
5515 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5516 {
5517 Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5518 return iemInitiateCpuShutdown(pVCpu);
5519 }
5520 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5521 {
5522 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5523 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5524 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5525 return VERR_EM_GUEST_CPU_HANG;
5526 }
5527 else
5528 {
5529 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5530 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5531 return VERR_IEM_IPE_9;
5532 }
5533
5534 /*
5535 * The 'EXT' bit is set when an exception occurs during deliver of an external
5536 * event (such as an interrupt or earlier exception)[1]. Privileged software
5537 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5538 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5539 *
5540 * [1] - Intel spec. 6.13 "Error Code"
5541 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5542 * [3] - Intel Instruction reference for INT n.
5543 */
5544 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5545 && (fFlags & IEM_XCPT_FLAGS_ERR)
5546 && u8Vector != X86_XCPT_PF
5547 && u8Vector != X86_XCPT_DF)
5548 {
5549 uErr |= X86_TRAP_ERR_EXTERNAL;
5550 }
5551 }
5552
5553 pVCpu->iem.s.cXcptRecursions++;
5554 pVCpu->iem.s.uCurXcpt = u8Vector;
5555 pVCpu->iem.s.fCurXcpt = fFlags;
5556 pVCpu->iem.s.uCurXcptErr = uErr;
5557 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5558
5559 /*
5560 * Extensive logging.
5561 */
5562#if defined(LOG_ENABLED) && defined(IN_RING3)
5563 if (LogIs3Enabled())
5564 {
5565 PVM pVM = pVCpu->CTX_SUFF(pVM);
5566 char szRegs[4096];
5567 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5568 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5569 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5570 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5571 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5572 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5573 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5574 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5575 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5576 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5577 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5578 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5579 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5580 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5581 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5582 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5583 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5584 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5585 " efer=%016VR{efer}\n"
5586 " pat=%016VR{pat}\n"
5587 " sf_mask=%016VR{sf_mask}\n"
5588 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5589 " lstar=%016VR{lstar}\n"
5590 " star=%016VR{star} cstar=%016VR{cstar}\n"
5591 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5592 );
5593
5594 char szInstr[256];
5595 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5596 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5597 szInstr, sizeof(szInstr), NULL);
5598 Log3(("%s%s\n", szRegs, szInstr));
5599 }
5600#endif /* LOG_ENABLED */
5601
5602 /*
5603 * Call the mode specific worker function.
5604 */
5605 VBOXSTRICTRC rcStrict;
5606 if (!(pCtx->cr0 & X86_CR0_PE))
5607 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5608 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5609 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5610 else
5611 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5612
5613 /* Flush the prefetch buffer. */
5614#ifdef IEM_WITH_CODE_TLB
5615 pVCpu->iem.s.pbInstrBuf = NULL;
5616#else
5617 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5618#endif
5619
5620 /*
5621 * Unwind.
5622 */
5623 pVCpu->iem.s.cXcptRecursions--;
5624 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5625 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5626 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5627 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5628 return rcStrict;
5629}
5630
5631#ifdef IEM_WITH_SETJMP
5632/**
5633 * See iemRaiseXcptOrInt. Will not return.
5634 */
5635IEM_STATIC DECL_NO_RETURN(void)
5636iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5637 uint8_t cbInstr,
5638 uint8_t u8Vector,
5639 uint32_t fFlags,
5640 uint16_t uErr,
5641 uint64_t uCr2)
5642{
5643 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5644 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5645}
5646#endif
5647
5648
5649/** \#DE - 00. */
5650DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5651{
5652 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5653}
5654
5655
5656/** \#DB - 01.
5657 * @note This automatically clear DR7.GD. */
5658DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5659{
5660 /** @todo set/clear RF. */
5661 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5662 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5663}
5664
5665
5666/** \#BR - 05. */
5667DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5668{
5669 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5670}
5671
5672
5673/** \#UD - 06. */
5674DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5675{
5676 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5677}
5678
5679
5680/** \#NM - 07. */
5681DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5682{
5683 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5684}
5685
5686
5687/** \#TS(err) - 0a. */
5688DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5689{
5690 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5691}
5692
5693
5694/** \#TS(tr) - 0a. */
5695DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5696{
5697 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5698 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5699}
5700
5701
5702/** \#TS(0) - 0a. */
5703DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5704{
5705 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5706 0, 0);
5707}
5708
5709
5710/** \#TS(err) - 0a. */
5711DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5712{
5713 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5714 uSel & X86_SEL_MASK_OFF_RPL, 0);
5715}
5716
5717
5718/** \#NP(err) - 0b. */
5719DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5720{
5721 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5722}
5723
5724
5725/** \#NP(sel) - 0b. */
5726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5727{
5728 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5729 uSel & ~X86_SEL_RPL, 0);
5730}
5731
5732
5733/** \#SS(seg) - 0c. */
5734DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5735{
5736 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5737 uSel & ~X86_SEL_RPL, 0);
5738}
5739
5740
5741/** \#SS(err) - 0c. */
5742DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5743{
5744 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5745}
5746
5747
5748/** \#GP(n) - 0d. */
5749DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5750{
5751 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5752}
5753
5754
5755/** \#GP(0) - 0d. */
5756DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5757{
5758 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5759}
5760
5761#ifdef IEM_WITH_SETJMP
5762/** \#GP(0) - 0d. */
5763DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5764{
5765 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5766}
5767#endif
5768
5769
5770/** \#GP(sel) - 0d. */
5771DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5772{
5773 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5774 Sel & ~X86_SEL_RPL, 0);
5775}
5776
5777
5778/** \#GP(0) - 0d. */
5779DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5780{
5781 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5782}
5783
5784
5785/** \#GP(sel) - 0d. */
5786DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5787{
5788 NOREF(iSegReg); NOREF(fAccess);
5789 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5790 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5791}
5792
5793#ifdef IEM_WITH_SETJMP
5794/** \#GP(sel) - 0d, longjmp. */
5795DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5796{
5797 NOREF(iSegReg); NOREF(fAccess);
5798 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5799 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5800}
5801#endif
5802
5803/** \#GP(sel) - 0d. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5805{
5806 NOREF(Sel);
5807 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5808}
5809
5810#ifdef IEM_WITH_SETJMP
5811/** \#GP(sel) - 0d, longjmp. */
5812DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5813{
5814 NOREF(Sel);
5815 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5816}
5817#endif
5818
5819
5820/** \#GP(sel) - 0d. */
5821DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5822{
5823 NOREF(iSegReg); NOREF(fAccess);
5824 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5825}
5826
5827#ifdef IEM_WITH_SETJMP
5828/** \#GP(sel) - 0d, longjmp. */
5829DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5830 uint32_t fAccess)
5831{
5832 NOREF(iSegReg); NOREF(fAccess);
5833 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5834}
5835#endif
5836
5837
5838/** \#PF(n) - 0e. */
5839DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5840{
5841 uint16_t uErr;
5842 switch (rc)
5843 {
5844 case VERR_PAGE_NOT_PRESENT:
5845 case VERR_PAGE_TABLE_NOT_PRESENT:
5846 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5847 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5848 uErr = 0;
5849 break;
5850
5851 default:
5852 AssertMsgFailed(("%Rrc\n", rc));
5853 /* fall thru */
5854 case VERR_ACCESS_DENIED:
5855 uErr = X86_TRAP_PF_P;
5856 break;
5857
5858 /** @todo reserved */
5859 }
5860
5861 if (pVCpu->iem.s.uCpl == 3)
5862 uErr |= X86_TRAP_PF_US;
5863
5864 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5865 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5866 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5867 uErr |= X86_TRAP_PF_ID;
5868
5869#if 0 /* This is so much non-sense, really. Why was it done like that? */
5870 /* Note! RW access callers reporting a WRITE protection fault, will clear
5871 the READ flag before calling. So, read-modify-write accesses (RW)
5872 can safely be reported as READ faults. */
5873 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5874 uErr |= X86_TRAP_PF_RW;
5875#else
5876 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5877 {
5878 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5879 uErr |= X86_TRAP_PF_RW;
5880 }
5881#endif
5882
5883 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5884 uErr, GCPtrWhere);
5885}
5886
5887#ifdef IEM_WITH_SETJMP
5888/** \#PF(n) - 0e, longjmp. */
5889IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5890{
5891 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5892}
5893#endif
5894
5895
5896/** \#MF(0) - 10. */
5897DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5898{
5899 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5900}
5901
5902
5903/** \#AC(0) - 11. */
5904DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5905{
5906 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5907}
5908
5909
5910/**
5911 * Macro for calling iemCImplRaiseDivideError().
5912 *
5913 * This enables us to add/remove arguments and force different levels of
5914 * inlining as we wish.
5915 *
5916 * @return Strict VBox status code.
5917 */
5918#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5919IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5920{
5921 NOREF(cbInstr);
5922 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5923}
5924
5925
5926/**
5927 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5928 *
5929 * This enables us to add/remove arguments and force different levels of
5930 * inlining as we wish.
5931 *
5932 * @return Strict VBox status code.
5933 */
5934#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5935IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5936{
5937 NOREF(cbInstr);
5938 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5939}
5940
5941
5942/**
5943 * Macro for calling iemCImplRaiseInvalidOpcode().
5944 *
5945 * This enables us to add/remove arguments and force different levels of
5946 * inlining as we wish.
5947 *
5948 * @return Strict VBox status code.
5949 */
5950#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5951IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5952{
5953 NOREF(cbInstr);
5954 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5955}
5956
5957
5958/** @} */
5959
5960
5961/*
5962 *
5963 * Helpers routines.
5964 * Helpers routines.
5965 * Helpers routines.
5966 *
5967 */
5968
5969/**
5970 * Recalculates the effective operand size.
5971 *
5972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5973 */
5974IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5975{
5976 switch (pVCpu->iem.s.enmCpuMode)
5977 {
5978 case IEMMODE_16BIT:
5979 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5980 break;
5981 case IEMMODE_32BIT:
5982 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5983 break;
5984 case IEMMODE_64BIT:
5985 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5986 {
5987 case 0:
5988 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5989 break;
5990 case IEM_OP_PRF_SIZE_OP:
5991 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5992 break;
5993 case IEM_OP_PRF_SIZE_REX_W:
5994 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5995 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5996 break;
5997 }
5998 break;
5999 default:
6000 AssertFailed();
6001 }
6002}
6003
6004
6005/**
6006 * Sets the default operand size to 64-bit and recalculates the effective
6007 * operand size.
6008 *
6009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6010 */
6011IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6012{
6013 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6014 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6015 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6016 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6017 else
6018 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6019}
6020
6021
6022/*
6023 *
6024 * Common opcode decoders.
6025 * Common opcode decoders.
6026 * Common opcode decoders.
6027 *
6028 */
6029//#include <iprt/mem.h>
6030
6031/**
6032 * Used to add extra details about a stub case.
6033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6034 */
6035IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6036{
6037#if defined(LOG_ENABLED) && defined(IN_RING3)
6038 PVM pVM = pVCpu->CTX_SUFF(pVM);
6039 char szRegs[4096];
6040 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6041 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6042 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6043 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6044 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6045 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6046 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6047 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6048 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6049 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6050 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6051 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6052 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6053 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6054 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6055 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6056 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6057 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6058 " efer=%016VR{efer}\n"
6059 " pat=%016VR{pat}\n"
6060 " sf_mask=%016VR{sf_mask}\n"
6061 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6062 " lstar=%016VR{lstar}\n"
6063 " star=%016VR{star} cstar=%016VR{cstar}\n"
6064 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6065 );
6066
6067 char szInstr[256];
6068 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6069 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6070 szInstr, sizeof(szInstr), NULL);
6071
6072 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6073#else
6074 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6075#endif
6076}
6077
6078/**
6079 * Complains about a stub.
6080 *
6081 * Providing two versions of this macro, one for daily use and one for use when
6082 * working on IEM.
6083 */
6084#if 0
6085# define IEMOP_BITCH_ABOUT_STUB() \
6086 do { \
6087 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6088 iemOpStubMsg2(pVCpu); \
6089 RTAssertPanic(); \
6090 } while (0)
6091#else
6092# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6093#endif
6094
6095/** Stubs an opcode. */
6096#define FNIEMOP_STUB(a_Name) \
6097 FNIEMOP_DEF(a_Name) \
6098 { \
6099 RT_NOREF_PV(pVCpu); \
6100 IEMOP_BITCH_ABOUT_STUB(); \
6101 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6102 } \
6103 typedef int ignore_semicolon
6104
6105/** Stubs an opcode. */
6106#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6107 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6108 { \
6109 RT_NOREF_PV(pVCpu); \
6110 RT_NOREF_PV(a_Name0); \
6111 IEMOP_BITCH_ABOUT_STUB(); \
6112 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6113 } \
6114 typedef int ignore_semicolon
6115
6116/** Stubs an opcode which currently should raise \#UD. */
6117#define FNIEMOP_UD_STUB(a_Name) \
6118 FNIEMOP_DEF(a_Name) \
6119 { \
6120 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6121 return IEMOP_RAISE_INVALID_OPCODE(); \
6122 } \
6123 typedef int ignore_semicolon
6124
6125/** Stubs an opcode which currently should raise \#UD. */
6126#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6127 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6128 { \
6129 RT_NOREF_PV(pVCpu); \
6130 RT_NOREF_PV(a_Name0); \
6131 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6132 return IEMOP_RAISE_INVALID_OPCODE(); \
6133 } \
6134 typedef int ignore_semicolon
6135
6136
6137
6138/** @name Register Access.
6139 * @{
6140 */
6141
6142/**
6143 * Gets a reference (pointer) to the specified hidden segment register.
6144 *
6145 * @returns Hidden register reference.
6146 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6147 * @param iSegReg The segment register.
6148 */
6149IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6150{
6151 Assert(iSegReg < X86_SREG_COUNT);
6152 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6153 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6154
6155#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6156 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6157 { /* likely */ }
6158 else
6159 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6160#else
6161 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6162#endif
6163 return pSReg;
6164}
6165
6166
6167/**
6168 * Ensures that the given hidden segment register is up to date.
6169 *
6170 * @returns Hidden register reference.
6171 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6172 * @param pSReg The segment register.
6173 */
6174IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6175{
6176#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6177 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6178 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6179#else
6180 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6181 NOREF(pVCpu);
6182#endif
6183 return pSReg;
6184}
6185
6186
6187/**
6188 * Gets a reference (pointer) to the specified segment register (the selector
6189 * value).
6190 *
6191 * @returns Pointer to the selector variable.
6192 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6193 * @param iSegReg The segment register.
6194 */
6195DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6196{
6197 Assert(iSegReg < X86_SREG_COUNT);
6198 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6199 return &pCtx->aSRegs[iSegReg].Sel;
6200}
6201
6202
6203/**
6204 * Fetches the selector value of a segment register.
6205 *
6206 * @returns The selector value.
6207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6208 * @param iSegReg The segment register.
6209 */
6210DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6211{
6212 Assert(iSegReg < X86_SREG_COUNT);
6213 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6214}
6215
6216
6217/**
6218 * Gets a reference (pointer) to the specified general purpose register.
6219 *
6220 * @returns Register reference.
6221 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6222 * @param iReg The general purpose register.
6223 */
6224DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6225{
6226 Assert(iReg < 16);
6227 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6228 return &pCtx->aGRegs[iReg];
6229}
6230
6231
6232/**
6233 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6234 *
6235 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6236 *
6237 * @returns Register reference.
6238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6239 * @param iReg The register.
6240 */
6241DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6242{
6243 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6244 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6245 {
6246 Assert(iReg < 16);
6247 return &pCtx->aGRegs[iReg].u8;
6248 }
6249 /* high 8-bit register. */
6250 Assert(iReg < 8);
6251 return &pCtx->aGRegs[iReg & 3].bHi;
6252}
6253
6254
6255/**
6256 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6257 *
6258 * @returns Register reference.
6259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6260 * @param iReg The register.
6261 */
6262DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6263{
6264 Assert(iReg < 16);
6265 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6266 return &pCtx->aGRegs[iReg].u16;
6267}
6268
6269
6270/**
6271 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6272 *
6273 * @returns Register reference.
6274 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6275 * @param iReg The register.
6276 */
6277DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6278{
6279 Assert(iReg < 16);
6280 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6281 return &pCtx->aGRegs[iReg].u32;
6282}
6283
6284
6285/**
6286 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6287 *
6288 * @returns Register reference.
6289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6290 * @param iReg The register.
6291 */
6292DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6293{
6294 Assert(iReg < 64);
6295 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6296 return &pCtx->aGRegs[iReg].u64;
6297}
6298
6299
6300/**
6301 * Fetches the value of a 8-bit general purpose register.
6302 *
6303 * @returns The register value.
6304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6305 * @param iReg The register.
6306 */
6307DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6308{
6309 return *iemGRegRefU8(pVCpu, iReg);
6310}
6311
6312
6313/**
6314 * Fetches the value of a 16-bit general purpose register.
6315 *
6316 * @returns The register value.
6317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6318 * @param iReg The register.
6319 */
6320DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6321{
6322 Assert(iReg < 16);
6323 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6324}
6325
6326
6327/**
6328 * Fetches the value of a 32-bit general purpose register.
6329 *
6330 * @returns The register value.
6331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6332 * @param iReg The register.
6333 */
6334DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6335{
6336 Assert(iReg < 16);
6337 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6338}
6339
6340
6341/**
6342 * Fetches the value of a 64-bit general purpose register.
6343 *
6344 * @returns The register value.
6345 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6346 * @param iReg The register.
6347 */
6348DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6349{
6350 Assert(iReg < 16);
6351 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6352}
6353
6354
6355/**
6356 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6357 *
6358 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6359 * segment limit.
6360 *
6361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6362 * @param offNextInstr The offset of the next instruction.
6363 */
6364IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6365{
6366 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6367 switch (pVCpu->iem.s.enmEffOpSize)
6368 {
6369 case IEMMODE_16BIT:
6370 {
6371 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6372 if ( uNewIp > pCtx->cs.u32Limit
6373 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6374 return iemRaiseGeneralProtectionFault0(pVCpu);
6375 pCtx->rip = uNewIp;
6376 break;
6377 }
6378
6379 case IEMMODE_32BIT:
6380 {
6381 Assert(pCtx->rip <= UINT32_MAX);
6382 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6383
6384 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6385 if (uNewEip > pCtx->cs.u32Limit)
6386 return iemRaiseGeneralProtectionFault0(pVCpu);
6387 pCtx->rip = uNewEip;
6388 break;
6389 }
6390
6391 case IEMMODE_64BIT:
6392 {
6393 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6394
6395 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6396 if (!IEM_IS_CANONICAL(uNewRip))
6397 return iemRaiseGeneralProtectionFault0(pVCpu);
6398 pCtx->rip = uNewRip;
6399 break;
6400 }
6401
6402 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6403 }
6404
6405 pCtx->eflags.Bits.u1RF = 0;
6406
6407#ifndef IEM_WITH_CODE_TLB
6408 /* Flush the prefetch buffer. */
6409 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6410#endif
6411
6412 return VINF_SUCCESS;
6413}
6414
6415
6416/**
6417 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6418 *
6419 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6420 * segment limit.
6421 *
6422 * @returns Strict VBox status code.
6423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6424 * @param offNextInstr The offset of the next instruction.
6425 */
6426IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6427{
6428 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6429 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6430
6431 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6432 if ( uNewIp > pCtx->cs.u32Limit
6433 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6434 return iemRaiseGeneralProtectionFault0(pVCpu);
6435 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6436 pCtx->rip = uNewIp;
6437 pCtx->eflags.Bits.u1RF = 0;
6438
6439#ifndef IEM_WITH_CODE_TLB
6440 /* Flush the prefetch buffer. */
6441 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6442#endif
6443
6444 return VINF_SUCCESS;
6445}
6446
6447
6448/**
6449 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6450 *
6451 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6452 * segment limit.
6453 *
6454 * @returns Strict VBox status code.
6455 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6456 * @param offNextInstr The offset of the next instruction.
6457 */
6458IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6459{
6460 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6461 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6462
6463 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6464 {
6465 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6466
6467 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6468 if (uNewEip > pCtx->cs.u32Limit)
6469 return iemRaiseGeneralProtectionFault0(pVCpu);
6470 pCtx->rip = uNewEip;
6471 }
6472 else
6473 {
6474 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6475
6476 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6477 if (!IEM_IS_CANONICAL(uNewRip))
6478 return iemRaiseGeneralProtectionFault0(pVCpu);
6479 pCtx->rip = uNewRip;
6480 }
6481 pCtx->eflags.Bits.u1RF = 0;
6482
6483#ifndef IEM_WITH_CODE_TLB
6484 /* Flush the prefetch buffer. */
6485 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6486#endif
6487
6488 return VINF_SUCCESS;
6489}
6490
6491
6492/**
6493 * Performs a near jump to the specified address.
6494 *
6495 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6496 * segment limit.
6497 *
6498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6499 * @param uNewRip The new RIP value.
6500 */
6501IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6502{
6503 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6504 switch (pVCpu->iem.s.enmEffOpSize)
6505 {
6506 case IEMMODE_16BIT:
6507 {
6508 Assert(uNewRip <= UINT16_MAX);
6509 if ( uNewRip > pCtx->cs.u32Limit
6510 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6511 return iemRaiseGeneralProtectionFault0(pVCpu);
6512 /** @todo Test 16-bit jump in 64-bit mode. */
6513 pCtx->rip = uNewRip;
6514 break;
6515 }
6516
6517 case IEMMODE_32BIT:
6518 {
6519 Assert(uNewRip <= UINT32_MAX);
6520 Assert(pCtx->rip <= UINT32_MAX);
6521 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6522
6523 if (uNewRip > pCtx->cs.u32Limit)
6524 return iemRaiseGeneralProtectionFault0(pVCpu);
6525 pCtx->rip = uNewRip;
6526 break;
6527 }
6528
6529 case IEMMODE_64BIT:
6530 {
6531 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6532
6533 if (!IEM_IS_CANONICAL(uNewRip))
6534 return iemRaiseGeneralProtectionFault0(pVCpu);
6535 pCtx->rip = uNewRip;
6536 break;
6537 }
6538
6539 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6540 }
6541
6542 pCtx->eflags.Bits.u1RF = 0;
6543
6544#ifndef IEM_WITH_CODE_TLB
6545 /* Flush the prefetch buffer. */
6546 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6547#endif
6548
6549 return VINF_SUCCESS;
6550}
6551
6552
6553/**
6554 * Get the address of the top of the stack.
6555 *
6556 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6557 * @param pCtx The CPU context which SP/ESP/RSP should be
6558 * read.
6559 */
6560DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6561{
6562 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6563 return pCtx->rsp;
6564 if (pCtx->ss.Attr.n.u1DefBig)
6565 return pCtx->esp;
6566 return pCtx->sp;
6567}
6568
6569
6570/**
6571 * Updates the RIP/EIP/IP to point to the next instruction.
6572 *
6573 * This function leaves the EFLAGS.RF flag alone.
6574 *
6575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6576 * @param cbInstr The number of bytes to add.
6577 */
6578IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6579{
6580 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6581 switch (pVCpu->iem.s.enmCpuMode)
6582 {
6583 case IEMMODE_16BIT:
6584 Assert(pCtx->rip <= UINT16_MAX);
6585 pCtx->eip += cbInstr;
6586 pCtx->eip &= UINT32_C(0xffff);
6587 break;
6588
6589 case IEMMODE_32BIT:
6590 pCtx->eip += cbInstr;
6591 Assert(pCtx->rip <= UINT32_MAX);
6592 break;
6593
6594 case IEMMODE_64BIT:
6595 pCtx->rip += cbInstr;
6596 break;
6597 default: AssertFailed();
6598 }
6599}
6600
6601
6602#if 0
6603/**
6604 * Updates the RIP/EIP/IP to point to the next instruction.
6605 *
6606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6607 */
6608IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6609{
6610 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6611}
6612#endif
6613
6614
6615
6616/**
6617 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6618 *
6619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6620 * @param cbInstr The number of bytes to add.
6621 */
6622IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6623{
6624 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6625
6626 pCtx->eflags.Bits.u1RF = 0;
6627
6628 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6629#if ARCH_BITS >= 64
6630 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6631 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6632 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6633#else
6634 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6635 pCtx->rip += cbInstr;
6636 else
6637 {
6638 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6639 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6640 }
6641#endif
6642}
6643
6644
6645/**
6646 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6647 *
6648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6649 */
6650IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6651{
6652 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6653}
6654
6655
6656/**
6657 * Adds to the stack pointer.
6658 *
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 * @param pCtx The CPU context which SP/ESP/RSP should be
6661 * updated.
6662 * @param cbToAdd The number of bytes to add (8-bit!).
6663 */
6664DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6665{
6666 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6667 pCtx->rsp += cbToAdd;
6668 else if (pCtx->ss.Attr.n.u1DefBig)
6669 pCtx->esp += cbToAdd;
6670 else
6671 pCtx->sp += cbToAdd;
6672}
6673
6674
6675/**
6676 * Subtracts from the stack pointer.
6677 *
6678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6679 * @param pCtx The CPU context which SP/ESP/RSP should be
6680 * updated.
6681 * @param cbToSub The number of bytes to subtract (8-bit!).
6682 */
6683DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6684{
6685 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6686 pCtx->rsp -= cbToSub;
6687 else if (pCtx->ss.Attr.n.u1DefBig)
6688 pCtx->esp -= cbToSub;
6689 else
6690 pCtx->sp -= cbToSub;
6691}
6692
6693
6694/**
6695 * Adds to the temporary stack pointer.
6696 *
6697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6698 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6699 * @param cbToAdd The number of bytes to add (16-bit).
6700 * @param pCtx Where to get the current stack mode.
6701 */
6702DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6703{
6704 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6705 pTmpRsp->u += cbToAdd;
6706 else if (pCtx->ss.Attr.n.u1DefBig)
6707 pTmpRsp->DWords.dw0 += cbToAdd;
6708 else
6709 pTmpRsp->Words.w0 += cbToAdd;
6710}
6711
6712
6713/**
6714 * Subtracts from the temporary stack pointer.
6715 *
6716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6717 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6718 * @param cbToSub The number of bytes to subtract.
6719 * @param pCtx Where to get the current stack mode.
6720 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6721 * expecting that.
6722 */
6723DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6724{
6725 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6726 pTmpRsp->u -= cbToSub;
6727 else if (pCtx->ss.Attr.n.u1DefBig)
6728 pTmpRsp->DWords.dw0 -= cbToSub;
6729 else
6730 pTmpRsp->Words.w0 -= cbToSub;
6731}
6732
6733
6734/**
6735 * Calculates the effective stack address for a push of the specified size as
6736 * well as the new RSP value (upper bits may be masked).
6737 *
6738 * @returns Effective stack addressf for the push.
6739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6740 * @param pCtx Where to get the current stack mode.
6741 * @param cbItem The size of the stack item to pop.
6742 * @param puNewRsp Where to return the new RSP value.
6743 */
6744DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6745{
6746 RTUINT64U uTmpRsp;
6747 RTGCPTR GCPtrTop;
6748 uTmpRsp.u = pCtx->rsp;
6749
6750 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6751 GCPtrTop = uTmpRsp.u -= cbItem;
6752 else if (pCtx->ss.Attr.n.u1DefBig)
6753 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6754 else
6755 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6756 *puNewRsp = uTmpRsp.u;
6757 return GCPtrTop;
6758}
6759
6760
6761/**
6762 * Gets the current stack pointer and calculates the value after a pop of the
6763 * specified size.
6764 *
6765 * @returns Current stack pointer.
6766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6767 * @param pCtx Where to get the current stack mode.
6768 * @param cbItem The size of the stack item to pop.
6769 * @param puNewRsp Where to return the new RSP value.
6770 */
6771DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6772{
6773 RTUINT64U uTmpRsp;
6774 RTGCPTR GCPtrTop;
6775 uTmpRsp.u = pCtx->rsp;
6776
6777 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6778 {
6779 GCPtrTop = uTmpRsp.u;
6780 uTmpRsp.u += cbItem;
6781 }
6782 else if (pCtx->ss.Attr.n.u1DefBig)
6783 {
6784 GCPtrTop = uTmpRsp.DWords.dw0;
6785 uTmpRsp.DWords.dw0 += cbItem;
6786 }
6787 else
6788 {
6789 GCPtrTop = uTmpRsp.Words.w0;
6790 uTmpRsp.Words.w0 += cbItem;
6791 }
6792 *puNewRsp = uTmpRsp.u;
6793 return GCPtrTop;
6794}
6795
6796
6797/**
6798 * Calculates the effective stack address for a push of the specified size as
6799 * well as the new temporary RSP value (upper bits may be masked).
6800 *
6801 * @returns Effective stack addressf for the push.
6802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6803 * @param pCtx Where to get the current stack mode.
6804 * @param pTmpRsp The temporary stack pointer. This is updated.
6805 * @param cbItem The size of the stack item to pop.
6806 */
6807DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6808{
6809 RTGCPTR GCPtrTop;
6810
6811 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6812 GCPtrTop = pTmpRsp->u -= cbItem;
6813 else if (pCtx->ss.Attr.n.u1DefBig)
6814 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6815 else
6816 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6817 return GCPtrTop;
6818}
6819
6820
6821/**
6822 * Gets the effective stack address for a pop of the specified size and
6823 * calculates and updates the temporary RSP.
6824 *
6825 * @returns Current stack pointer.
6826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6827 * @param pCtx Where to get the current stack mode.
6828 * @param pTmpRsp The temporary stack pointer. This is updated.
6829 * @param cbItem The size of the stack item to pop.
6830 */
6831DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6832{
6833 RTGCPTR GCPtrTop;
6834 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6835 {
6836 GCPtrTop = pTmpRsp->u;
6837 pTmpRsp->u += cbItem;
6838 }
6839 else if (pCtx->ss.Attr.n.u1DefBig)
6840 {
6841 GCPtrTop = pTmpRsp->DWords.dw0;
6842 pTmpRsp->DWords.dw0 += cbItem;
6843 }
6844 else
6845 {
6846 GCPtrTop = pTmpRsp->Words.w0;
6847 pTmpRsp->Words.w0 += cbItem;
6848 }
6849 return GCPtrTop;
6850}
6851
6852/** @} */
6853
6854
6855/** @name FPU access and helpers.
6856 *
6857 * @{
6858 */
6859
6860
6861/**
6862 * Hook for preparing to use the host FPU.
6863 *
6864 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6865 *
6866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6867 */
6868DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6869{
6870#ifdef IN_RING3
6871 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6872#else
6873 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6874#endif
6875}
6876
6877
6878/**
6879 * Hook for preparing to use the host FPU for SSE.
6880 *
6881 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6882 *
6883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6884 */
6885DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6886{
6887 iemFpuPrepareUsage(pVCpu);
6888}
6889
6890
6891/**
6892 * Hook for preparing to use the host FPU for AVX.
6893 *
6894 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6895 *
6896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6897 */
6898DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6899{
6900 iemFpuPrepareUsage(pVCpu);
6901}
6902
6903
6904/**
6905 * Hook for actualizing the guest FPU state before the interpreter reads it.
6906 *
6907 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6908 *
6909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6910 */
6911DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6912{
6913#ifdef IN_RING3
6914 NOREF(pVCpu);
6915#else
6916 CPUMRZFpuStateActualizeForRead(pVCpu);
6917#endif
6918}
6919
6920
6921/**
6922 * Hook for actualizing the guest FPU state before the interpreter changes it.
6923 *
6924 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6925 *
6926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6927 */
6928DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6929{
6930#ifdef IN_RING3
6931 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6932#else
6933 CPUMRZFpuStateActualizeForChange(pVCpu);
6934#endif
6935}
6936
6937
6938/**
6939 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6940 * only.
6941 *
6942 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6943 *
6944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6945 */
6946DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6947{
6948#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6949 NOREF(pVCpu);
6950#else
6951 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6952#endif
6953}
6954
6955
6956/**
6957 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6958 * read+write.
6959 *
6960 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6961 *
6962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6963 */
6964DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6965{
6966#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6967 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6968#else
6969 CPUMRZFpuStateActualizeForChange(pVCpu);
6970#endif
6971}
6972
6973
6974/**
6975 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6976 * only.
6977 *
6978 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6979 *
6980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6981 */
6982DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6983{
6984#ifdef IN_RING3
6985 NOREF(pVCpu);
6986#else
6987 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6988#endif
6989}
6990
6991
6992/**
6993 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6994 * read+write.
6995 *
6996 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7001{
7002#ifdef IN_RING3
7003 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7004#else
7005 CPUMRZFpuStateActualizeForChange(pVCpu);
7006#endif
7007}
7008
7009
7010/**
7011 * Stores a QNaN value into a FPU register.
7012 *
7013 * @param pReg Pointer to the register.
7014 */
7015DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7016{
7017 pReg->au32[0] = UINT32_C(0x00000000);
7018 pReg->au32[1] = UINT32_C(0xc0000000);
7019 pReg->au16[4] = UINT16_C(0xffff);
7020}
7021
7022
7023/**
7024 * Updates the FOP, FPU.CS and FPUIP registers.
7025 *
7026 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7027 * @param pCtx The CPU context.
7028 * @param pFpuCtx The FPU context.
7029 */
7030DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
7031{
7032 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7033 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7034 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7035 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7036 {
7037 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7038 * happens in real mode here based on the fnsave and fnstenv images. */
7039 pFpuCtx->CS = 0;
7040 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7041 }
7042 else
7043 {
7044 pFpuCtx->CS = pCtx->cs.Sel;
7045 pFpuCtx->FPUIP = pCtx->rip;
7046 }
7047}
7048
7049
7050/**
7051 * Updates the x87.DS and FPUDP registers.
7052 *
7053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7054 * @param pCtx The CPU context.
7055 * @param pFpuCtx The FPU context.
7056 * @param iEffSeg The effective segment register.
7057 * @param GCPtrEff The effective address relative to @a iEffSeg.
7058 */
7059DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7060{
7061 RTSEL sel;
7062 switch (iEffSeg)
7063 {
7064 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7065 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7066 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7067 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7068 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7069 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7070 default:
7071 AssertMsgFailed(("%d\n", iEffSeg));
7072 sel = pCtx->ds.Sel;
7073 }
7074 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7075 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7076 {
7077 pFpuCtx->DS = 0;
7078 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7079 }
7080 else
7081 {
7082 pFpuCtx->DS = sel;
7083 pFpuCtx->FPUDP = GCPtrEff;
7084 }
7085}
7086
7087
7088/**
7089 * Rotates the stack registers in the push direction.
7090 *
7091 * @param pFpuCtx The FPU context.
7092 * @remarks This is a complete waste of time, but fxsave stores the registers in
7093 * stack order.
7094 */
7095DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7096{
7097 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7098 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7099 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7100 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7101 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7102 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7103 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7104 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7105 pFpuCtx->aRegs[0].r80 = r80Tmp;
7106}
7107
7108
7109/**
7110 * Rotates the stack registers in the pop direction.
7111 *
7112 * @param pFpuCtx The FPU context.
7113 * @remarks This is a complete waste of time, but fxsave stores the registers in
7114 * stack order.
7115 */
7116DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7117{
7118 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7119 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7120 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7121 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7122 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7123 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7124 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7125 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7126 pFpuCtx->aRegs[7].r80 = r80Tmp;
7127}
7128
7129
7130/**
7131 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7132 * exception prevents it.
7133 *
7134 * @param pResult The FPU operation result to push.
7135 * @param pFpuCtx The FPU context.
7136 */
7137IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7138{
7139 /* Update FSW and bail if there are pending exceptions afterwards. */
7140 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7141 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7142 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7143 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7144 {
7145 pFpuCtx->FSW = fFsw;
7146 return;
7147 }
7148
7149 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7150 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7151 {
7152 /* All is fine, push the actual value. */
7153 pFpuCtx->FTW |= RT_BIT(iNewTop);
7154 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7155 }
7156 else if (pFpuCtx->FCW & X86_FCW_IM)
7157 {
7158 /* Masked stack overflow, push QNaN. */
7159 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7160 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7161 }
7162 else
7163 {
7164 /* Raise stack overflow, don't push anything. */
7165 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7166 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7167 return;
7168 }
7169
7170 fFsw &= ~X86_FSW_TOP_MASK;
7171 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7172 pFpuCtx->FSW = fFsw;
7173
7174 iemFpuRotateStackPush(pFpuCtx);
7175}
7176
7177
7178/**
7179 * Stores a result in a FPU register and updates the FSW and FTW.
7180 *
7181 * @param pFpuCtx The FPU context.
7182 * @param pResult The result to store.
7183 * @param iStReg Which FPU register to store it in.
7184 */
7185IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7186{
7187 Assert(iStReg < 8);
7188 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7189 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7190 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7191 pFpuCtx->FTW |= RT_BIT(iReg);
7192 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7193}
7194
7195
7196/**
7197 * Only updates the FPU status word (FSW) with the result of the current
7198 * instruction.
7199 *
7200 * @param pFpuCtx The FPU context.
7201 * @param u16FSW The FSW output of the current instruction.
7202 */
7203IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7204{
7205 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7206 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7207}
7208
7209
7210/**
7211 * Pops one item off the FPU stack if no pending exception prevents it.
7212 *
7213 * @param pFpuCtx The FPU context.
7214 */
7215IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7216{
7217 /* Check pending exceptions. */
7218 uint16_t uFSW = pFpuCtx->FSW;
7219 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7220 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7221 return;
7222
7223 /* TOP--. */
7224 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7225 uFSW &= ~X86_FSW_TOP_MASK;
7226 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7227 pFpuCtx->FSW = uFSW;
7228
7229 /* Mark the previous ST0 as empty. */
7230 iOldTop >>= X86_FSW_TOP_SHIFT;
7231 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7232
7233 /* Rotate the registers. */
7234 iemFpuRotateStackPop(pFpuCtx);
7235}
7236
7237
7238/**
7239 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7240 *
7241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7242 * @param pResult The FPU operation result to push.
7243 */
7244IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7245{
7246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7247 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7248 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7249 iemFpuMaybePushResult(pResult, pFpuCtx);
7250}
7251
7252
7253/**
7254 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7255 * and sets FPUDP and FPUDS.
7256 *
7257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7258 * @param pResult The FPU operation result to push.
7259 * @param iEffSeg The effective segment register.
7260 * @param GCPtrEff The effective address relative to @a iEffSeg.
7261 */
7262IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7263{
7264 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7265 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7266 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7267 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7268 iemFpuMaybePushResult(pResult, pFpuCtx);
7269}
7270
7271
7272/**
7273 * Replace ST0 with the first value and push the second onto the FPU stack,
7274 * unless a pending exception prevents it.
7275 *
7276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7277 * @param pResult The FPU operation result to store and push.
7278 */
7279IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7280{
7281 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7282 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7283 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7284
7285 /* Update FSW and bail if there are pending exceptions afterwards. */
7286 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7287 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7288 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7289 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7290 {
7291 pFpuCtx->FSW = fFsw;
7292 return;
7293 }
7294
7295 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7296 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7297 {
7298 /* All is fine, push the actual value. */
7299 pFpuCtx->FTW |= RT_BIT(iNewTop);
7300 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7301 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7302 }
7303 else if (pFpuCtx->FCW & X86_FCW_IM)
7304 {
7305 /* Masked stack overflow, push QNaN. */
7306 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7307 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7308 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7309 }
7310 else
7311 {
7312 /* Raise stack overflow, don't push anything. */
7313 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7314 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7315 return;
7316 }
7317
7318 fFsw &= ~X86_FSW_TOP_MASK;
7319 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7320 pFpuCtx->FSW = fFsw;
7321
7322 iemFpuRotateStackPush(pFpuCtx);
7323}
7324
7325
7326/**
7327 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7328 * FOP.
7329 *
7330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7331 * @param pResult The result to store.
7332 * @param iStReg Which FPU register to store it in.
7333 */
7334IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7335{
7336 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7337 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7338 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7339 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7340}
7341
7342
7343/**
7344 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7345 * FOP, and then pops the stack.
7346 *
7347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7348 * @param pResult The result to store.
7349 * @param iStReg Which FPU register to store it in.
7350 */
7351IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7352{
7353 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7354 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7355 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7356 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7357 iemFpuMaybePopOne(pFpuCtx);
7358}
7359
7360
7361/**
7362 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7363 * FPUDP, and FPUDS.
7364 *
7365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7366 * @param pResult The result to store.
7367 * @param iStReg Which FPU register to store it in.
7368 * @param iEffSeg The effective memory operand selector register.
7369 * @param GCPtrEff The effective memory operand offset.
7370 */
7371IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7372 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7373{
7374 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7375 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7376 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7377 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7378 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7379}
7380
7381
7382/**
7383 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7384 * FPUDP, and FPUDS, and then pops the stack.
7385 *
7386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7387 * @param pResult The result to store.
7388 * @param iStReg Which FPU register to store it in.
7389 * @param iEffSeg The effective memory operand selector register.
7390 * @param GCPtrEff The effective memory operand offset.
7391 */
7392IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7393 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7394{
7395 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7396 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7397 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7398 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7399 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7400 iemFpuMaybePopOne(pFpuCtx);
7401}
7402
7403
7404/**
7405 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7406 *
7407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7408 */
7409IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7410{
7411 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7412 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7413 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7414}
7415
7416
7417/**
7418 * Marks the specified stack register as free (for FFREE).
7419 *
7420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7421 * @param iStReg The register to free.
7422 */
7423IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7424{
7425 Assert(iStReg < 8);
7426 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7427 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7428 pFpuCtx->FTW &= ~RT_BIT(iReg);
7429}
7430
7431
7432/**
7433 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7434 *
7435 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7436 */
7437IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7438{
7439 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7440 uint16_t uFsw = pFpuCtx->FSW;
7441 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7442 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7443 uFsw &= ~X86_FSW_TOP_MASK;
7444 uFsw |= uTop;
7445 pFpuCtx->FSW = uFsw;
7446}
7447
7448
7449/**
7450 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7451 *
7452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7453 */
7454IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7455{
7456 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7457 uint16_t uFsw = pFpuCtx->FSW;
7458 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7459 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7460 uFsw &= ~X86_FSW_TOP_MASK;
7461 uFsw |= uTop;
7462 pFpuCtx->FSW = uFsw;
7463}
7464
7465
7466/**
7467 * Updates the FSW, FOP, FPUIP, and FPUCS.
7468 *
7469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7470 * @param u16FSW The FSW from the current instruction.
7471 */
7472IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7473{
7474 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7475 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7476 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7477 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7478}
7479
7480
7481/**
7482 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7483 *
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param u16FSW The FSW from the current instruction.
7486 */
7487IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7488{
7489 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7490 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7491 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7492 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7493 iemFpuMaybePopOne(pFpuCtx);
7494}
7495
7496
7497/**
7498 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7499 *
7500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7501 * @param u16FSW The FSW from the current instruction.
7502 * @param iEffSeg The effective memory operand selector register.
7503 * @param GCPtrEff The effective memory operand offset.
7504 */
7505IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7506{
7507 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7508 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7509 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7510 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7511 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7512}
7513
7514
7515/**
7516 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7517 *
7518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7519 * @param u16FSW The FSW from the current instruction.
7520 */
7521IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7522{
7523 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7524 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7525 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7526 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7527 iemFpuMaybePopOne(pFpuCtx);
7528 iemFpuMaybePopOne(pFpuCtx);
7529}
7530
7531
7532/**
7533 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7534 *
7535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7536 * @param u16FSW The FSW from the current instruction.
7537 * @param iEffSeg The effective memory operand selector register.
7538 * @param GCPtrEff The effective memory operand offset.
7539 */
7540IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7541{
7542 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7543 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7544 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7545 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7546 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7547 iemFpuMaybePopOne(pFpuCtx);
7548}
7549
7550
7551/**
7552 * Worker routine for raising an FPU stack underflow exception.
7553 *
7554 * @param pFpuCtx The FPU context.
7555 * @param iStReg The stack register being accessed.
7556 */
7557IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7558{
7559 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7560 if (pFpuCtx->FCW & X86_FCW_IM)
7561 {
7562 /* Masked underflow. */
7563 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7564 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7565 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7566 if (iStReg != UINT8_MAX)
7567 {
7568 pFpuCtx->FTW |= RT_BIT(iReg);
7569 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7570 }
7571 }
7572 else
7573 {
7574 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7575 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7576 }
7577}
7578
7579
7580/**
7581 * Raises a FPU stack underflow exception.
7582 *
7583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7584 * @param iStReg The destination register that should be loaded
7585 * with QNaN if \#IS is not masked. Specify
7586 * UINT8_MAX if none (like for fcom).
7587 */
7588DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7589{
7590 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7591 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7592 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7593 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7594}
7595
7596
7597DECL_NO_INLINE(IEM_STATIC, void)
7598iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7599{
7600 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7601 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7602 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7603 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7604 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7605}
7606
7607
7608DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7609{
7610 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7611 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7612 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7613 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7614 iemFpuMaybePopOne(pFpuCtx);
7615}
7616
7617
7618DECL_NO_INLINE(IEM_STATIC, void)
7619iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7620{
7621 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7622 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7623 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7624 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7625 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7626 iemFpuMaybePopOne(pFpuCtx);
7627}
7628
7629
7630DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7631{
7632 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7633 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7635 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7636 iemFpuMaybePopOne(pFpuCtx);
7637 iemFpuMaybePopOne(pFpuCtx);
7638}
7639
7640
7641DECL_NO_INLINE(IEM_STATIC, void)
7642iemFpuStackPushUnderflow(PVMCPU pVCpu)
7643{
7644 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7645 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7646 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7647
7648 if (pFpuCtx->FCW & X86_FCW_IM)
7649 {
7650 /* Masked overflow - Push QNaN. */
7651 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7652 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7653 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7654 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7655 pFpuCtx->FTW |= RT_BIT(iNewTop);
7656 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7657 iemFpuRotateStackPush(pFpuCtx);
7658 }
7659 else
7660 {
7661 /* Exception pending - don't change TOP or the register stack. */
7662 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7663 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7664 }
7665}
7666
7667
7668DECL_NO_INLINE(IEM_STATIC, void)
7669iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7670{
7671 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7672 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7673 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7674
7675 if (pFpuCtx->FCW & X86_FCW_IM)
7676 {
7677 /* Masked overflow - Push QNaN. */
7678 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7679 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7680 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7681 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7682 pFpuCtx->FTW |= RT_BIT(iNewTop);
7683 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7684 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7685 iemFpuRotateStackPush(pFpuCtx);
7686 }
7687 else
7688 {
7689 /* Exception pending - don't change TOP or the register stack. */
7690 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7691 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7692 }
7693}
7694
7695
7696/**
7697 * Worker routine for raising an FPU stack overflow exception on a push.
7698 *
7699 * @param pFpuCtx The FPU context.
7700 */
7701IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7702{
7703 if (pFpuCtx->FCW & X86_FCW_IM)
7704 {
7705 /* Masked overflow. */
7706 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7707 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7708 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7709 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7710 pFpuCtx->FTW |= RT_BIT(iNewTop);
7711 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7712 iemFpuRotateStackPush(pFpuCtx);
7713 }
7714 else
7715 {
7716 /* Exception pending - don't change TOP or the register stack. */
7717 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7718 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7719 }
7720}
7721
7722
7723/**
7724 * Raises a FPU stack overflow exception on a push.
7725 *
7726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7727 */
7728DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7729{
7730 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7731 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7732 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7733 iemFpuStackPushOverflowOnly(pFpuCtx);
7734}
7735
7736
7737/**
7738 * Raises a FPU stack overflow exception on a push with a memory operand.
7739 *
7740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7741 * @param iEffSeg The effective memory operand selector register.
7742 * @param GCPtrEff The effective memory operand offset.
7743 */
7744DECL_NO_INLINE(IEM_STATIC, void)
7745iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7746{
7747 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7748 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7749 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7750 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7751 iemFpuStackPushOverflowOnly(pFpuCtx);
7752}
7753
7754
7755IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7756{
7757 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7758 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7759 if (pFpuCtx->FTW & RT_BIT(iReg))
7760 return VINF_SUCCESS;
7761 return VERR_NOT_FOUND;
7762}
7763
7764
7765IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7766{
7767 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7768 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7769 if (pFpuCtx->FTW & RT_BIT(iReg))
7770 {
7771 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7772 return VINF_SUCCESS;
7773 }
7774 return VERR_NOT_FOUND;
7775}
7776
7777
7778IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7779 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7780{
7781 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7782 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7783 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7784 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7785 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7786 {
7787 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7788 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7789 return VINF_SUCCESS;
7790 }
7791 return VERR_NOT_FOUND;
7792}
7793
7794
7795IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7796{
7797 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7798 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7799 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7800 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7801 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7802 {
7803 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7804 return VINF_SUCCESS;
7805 }
7806 return VERR_NOT_FOUND;
7807}
7808
7809
7810/**
7811 * Updates the FPU exception status after FCW is changed.
7812 *
7813 * @param pFpuCtx The FPU context.
7814 */
7815IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7816{
7817 uint16_t u16Fsw = pFpuCtx->FSW;
7818 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7819 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7820 else
7821 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7822 pFpuCtx->FSW = u16Fsw;
7823}
7824
7825
7826/**
7827 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7828 *
7829 * @returns The full FTW.
7830 * @param pFpuCtx The FPU context.
7831 */
7832IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7833{
7834 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7835 uint16_t u16Ftw = 0;
7836 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7837 for (unsigned iSt = 0; iSt < 8; iSt++)
7838 {
7839 unsigned const iReg = (iSt + iTop) & 7;
7840 if (!(u8Ftw & RT_BIT(iReg)))
7841 u16Ftw |= 3 << (iReg * 2); /* empty */
7842 else
7843 {
7844 uint16_t uTag;
7845 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7846 if (pr80Reg->s.uExponent == 0x7fff)
7847 uTag = 2; /* Exponent is all 1's => Special. */
7848 else if (pr80Reg->s.uExponent == 0x0000)
7849 {
7850 if (pr80Reg->s.u64Mantissa == 0x0000)
7851 uTag = 1; /* All bits are zero => Zero. */
7852 else
7853 uTag = 2; /* Must be special. */
7854 }
7855 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7856 uTag = 0; /* Valid. */
7857 else
7858 uTag = 2; /* Must be special. */
7859
7860 u16Ftw |= uTag << (iReg * 2); /* empty */
7861 }
7862 }
7863
7864 return u16Ftw;
7865}
7866
7867
7868/**
7869 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7870 *
7871 * @returns The compressed FTW.
7872 * @param u16FullFtw The full FTW to convert.
7873 */
7874IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7875{
7876 uint8_t u8Ftw = 0;
7877 for (unsigned i = 0; i < 8; i++)
7878 {
7879 if ((u16FullFtw & 3) != 3 /*empty*/)
7880 u8Ftw |= RT_BIT(i);
7881 u16FullFtw >>= 2;
7882 }
7883
7884 return u8Ftw;
7885}
7886
7887/** @} */
7888
7889
7890/** @name Memory access.
7891 *
7892 * @{
7893 */
7894
7895
7896/**
7897 * Updates the IEMCPU::cbWritten counter if applicable.
7898 *
7899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7900 * @param fAccess The access being accounted for.
7901 * @param cbMem The access size.
7902 */
7903DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7904{
7905 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7906 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7907 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7908}
7909
7910
7911/**
7912 * Checks if the given segment can be written to, raise the appropriate
7913 * exception if not.
7914 *
7915 * @returns VBox strict status code.
7916 *
7917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7918 * @param pHid Pointer to the hidden register.
7919 * @param iSegReg The register number.
7920 * @param pu64BaseAddr Where to return the base address to use for the
7921 * segment. (In 64-bit code it may differ from the
7922 * base in the hidden segment.)
7923 */
7924IEM_STATIC VBOXSTRICTRC
7925iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7926{
7927 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7928 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7929 else
7930 {
7931 if (!pHid->Attr.n.u1Present)
7932 {
7933 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7934 AssertRelease(uSel == 0);
7935 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7936 return iemRaiseGeneralProtectionFault0(pVCpu);
7937 }
7938
7939 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7940 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7941 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7942 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7943 *pu64BaseAddr = pHid->u64Base;
7944 }
7945 return VINF_SUCCESS;
7946}
7947
7948
7949/**
7950 * Checks if the given segment can be read from, raise the appropriate
7951 * exception if not.
7952 *
7953 * @returns VBox strict status code.
7954 *
7955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7956 * @param pHid Pointer to the hidden register.
7957 * @param iSegReg The register number.
7958 * @param pu64BaseAddr Where to return the base address to use for the
7959 * segment. (In 64-bit code it may differ from the
7960 * base in the hidden segment.)
7961 */
7962IEM_STATIC VBOXSTRICTRC
7963iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7964{
7965 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7966 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7967 else
7968 {
7969 if (!pHid->Attr.n.u1Present)
7970 {
7971 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7972 AssertRelease(uSel == 0);
7973 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7974 return iemRaiseGeneralProtectionFault0(pVCpu);
7975 }
7976
7977 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7978 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7979 *pu64BaseAddr = pHid->u64Base;
7980 }
7981 return VINF_SUCCESS;
7982}
7983
7984
7985/**
7986 * Applies the segment limit, base and attributes.
7987 *
7988 * This may raise a \#GP or \#SS.
7989 *
7990 * @returns VBox strict status code.
7991 *
7992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7993 * @param fAccess The kind of access which is being performed.
7994 * @param iSegReg The index of the segment register to apply.
7995 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7996 * TSS, ++).
7997 * @param cbMem The access size.
7998 * @param pGCPtrMem Pointer to the guest memory address to apply
7999 * segmentation to. Input and output parameter.
8000 */
8001IEM_STATIC VBOXSTRICTRC
8002iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8003{
8004 if (iSegReg == UINT8_MAX)
8005 return VINF_SUCCESS;
8006
8007 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8008 switch (pVCpu->iem.s.enmCpuMode)
8009 {
8010 case IEMMODE_16BIT:
8011 case IEMMODE_32BIT:
8012 {
8013 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8014 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8015
8016 if ( pSel->Attr.n.u1Present
8017 && !pSel->Attr.n.u1Unusable)
8018 {
8019 Assert(pSel->Attr.n.u1DescType);
8020 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8021 {
8022 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8023 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8024 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8025
8026 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8027 {
8028 /** @todo CPL check. */
8029 }
8030
8031 /*
8032 * There are two kinds of data selectors, normal and expand down.
8033 */
8034 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8035 {
8036 if ( GCPtrFirst32 > pSel->u32Limit
8037 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8038 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8039 }
8040 else
8041 {
8042 /*
8043 * The upper boundary is defined by the B bit, not the G bit!
8044 */
8045 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8046 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8047 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8048 }
8049 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8050 }
8051 else
8052 {
8053
8054 /*
8055 * Code selector and usually be used to read thru, writing is
8056 * only permitted in real and V8086 mode.
8057 */
8058 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8059 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8060 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8061 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8062 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8063
8064 if ( GCPtrFirst32 > pSel->u32Limit
8065 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8066 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8067
8068 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8069 {
8070 /** @todo CPL check. */
8071 }
8072
8073 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8074 }
8075 }
8076 else
8077 return iemRaiseGeneralProtectionFault0(pVCpu);
8078 return VINF_SUCCESS;
8079 }
8080
8081 case IEMMODE_64BIT:
8082 {
8083 RTGCPTR GCPtrMem = *pGCPtrMem;
8084 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8085 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8086
8087 Assert(cbMem >= 1);
8088 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8089 return VINF_SUCCESS;
8090 return iemRaiseGeneralProtectionFault0(pVCpu);
8091 }
8092
8093 default:
8094 AssertFailedReturn(VERR_IEM_IPE_7);
8095 }
8096}
8097
8098
8099/**
8100 * Translates a virtual address to a physical physical address and checks if we
8101 * can access the page as specified.
8102 *
8103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8104 * @param GCPtrMem The virtual address.
8105 * @param fAccess The intended access.
8106 * @param pGCPhysMem Where to return the physical address.
8107 */
8108IEM_STATIC VBOXSTRICTRC
8109iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8110{
8111 /** @todo Need a different PGM interface here. We're currently using
8112 * generic / REM interfaces. this won't cut it for R0 & RC. */
8113 RTGCPHYS GCPhys;
8114 uint64_t fFlags;
8115 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8116 if (RT_FAILURE(rc))
8117 {
8118 /** @todo Check unassigned memory in unpaged mode. */
8119 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8120 *pGCPhysMem = NIL_RTGCPHYS;
8121 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8122 }
8123
8124 /* If the page is writable and does not have the no-exec bit set, all
8125 access is allowed. Otherwise we'll have to check more carefully... */
8126 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8127 {
8128 /* Write to read only memory? */
8129 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8130 && !(fFlags & X86_PTE_RW)
8131 && ( (pVCpu->iem.s.uCpl == 3
8132 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8133 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8134 {
8135 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8136 *pGCPhysMem = NIL_RTGCPHYS;
8137 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8138 }
8139
8140 /* Kernel memory accessed by userland? */
8141 if ( !(fFlags & X86_PTE_US)
8142 && pVCpu->iem.s.uCpl == 3
8143 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8144 {
8145 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8146 *pGCPhysMem = NIL_RTGCPHYS;
8147 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8148 }
8149
8150 /* Executing non-executable memory? */
8151 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8152 && (fFlags & X86_PTE_PAE_NX)
8153 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8154 {
8155 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8156 *pGCPhysMem = NIL_RTGCPHYS;
8157 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8158 VERR_ACCESS_DENIED);
8159 }
8160 }
8161
8162 /*
8163 * Set the dirty / access flags.
8164 * ASSUMES this is set when the address is translated rather than on committ...
8165 */
8166 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8167 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8168 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8169 {
8170 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8171 AssertRC(rc2);
8172 }
8173
8174 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8175 *pGCPhysMem = GCPhys;
8176 return VINF_SUCCESS;
8177}
8178
8179
8180
8181/**
8182 * Maps a physical page.
8183 *
8184 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8185 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8186 * @param GCPhysMem The physical address.
8187 * @param fAccess The intended access.
8188 * @param ppvMem Where to return the mapping address.
8189 * @param pLock The PGM lock.
8190 */
8191IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8192{
8193#ifdef IEM_VERIFICATION_MODE_FULL
8194 /* Force the alternative path so we can ignore writes. */
8195 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8196 {
8197 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8198 {
8199 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8200 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8201 if (RT_FAILURE(rc2))
8202 pVCpu->iem.s.fProblematicMemory = true;
8203 }
8204 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8205 }
8206#endif
8207#ifdef IEM_LOG_MEMORY_WRITES
8208 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8209 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8210#endif
8211#ifdef IEM_VERIFICATION_MODE_MINIMAL
8212 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8213#endif
8214
8215 /** @todo This API may require some improving later. A private deal with PGM
8216 * regarding locking and unlocking needs to be struct. A couple of TLBs
8217 * living in PGM, but with publicly accessible inlined access methods
8218 * could perhaps be an even better solution. */
8219 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8220 GCPhysMem,
8221 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8222 pVCpu->iem.s.fBypassHandlers,
8223 ppvMem,
8224 pLock);
8225 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8226 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8227
8228#ifdef IEM_VERIFICATION_MODE_FULL
8229 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8230 pVCpu->iem.s.fProblematicMemory = true;
8231#endif
8232 return rc;
8233}
8234
8235
8236/**
8237 * Unmap a page previously mapped by iemMemPageMap.
8238 *
8239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8240 * @param GCPhysMem The physical address.
8241 * @param fAccess The intended access.
8242 * @param pvMem What iemMemPageMap returned.
8243 * @param pLock The PGM lock.
8244 */
8245DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8246{
8247 NOREF(pVCpu);
8248 NOREF(GCPhysMem);
8249 NOREF(fAccess);
8250 NOREF(pvMem);
8251 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8252}
8253
8254
8255/**
8256 * Looks up a memory mapping entry.
8257 *
8258 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8260 * @param pvMem The memory address.
8261 * @param fAccess The access to.
8262 */
8263DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8264{
8265 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8266 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8267 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8268 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8269 return 0;
8270 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8271 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8272 return 1;
8273 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8274 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8275 return 2;
8276 return VERR_NOT_FOUND;
8277}
8278
8279
8280/**
8281 * Finds a free memmap entry when using iNextMapping doesn't work.
8282 *
8283 * @returns Memory mapping index, 1024 on failure.
8284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8285 */
8286IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8287{
8288 /*
8289 * The easy case.
8290 */
8291 if (pVCpu->iem.s.cActiveMappings == 0)
8292 {
8293 pVCpu->iem.s.iNextMapping = 1;
8294 return 0;
8295 }
8296
8297 /* There should be enough mappings for all instructions. */
8298 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8299
8300 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8301 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8302 return i;
8303
8304 AssertFailedReturn(1024);
8305}
8306
8307
8308/**
8309 * Commits a bounce buffer that needs writing back and unmaps it.
8310 *
8311 * @returns Strict VBox status code.
8312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8313 * @param iMemMap The index of the buffer to commit.
8314 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8315 * Always false in ring-3, obviously.
8316 */
8317IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8318{
8319 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8320 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8321#ifdef IN_RING3
8322 Assert(!fPostponeFail);
8323 RT_NOREF_PV(fPostponeFail);
8324#endif
8325
8326 /*
8327 * Do the writing.
8328 */
8329#ifndef IEM_VERIFICATION_MODE_MINIMAL
8330 PVM pVM = pVCpu->CTX_SUFF(pVM);
8331 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8332 && !IEM_VERIFICATION_ENABLED(pVCpu))
8333 {
8334 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8335 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8336 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8337 if (!pVCpu->iem.s.fBypassHandlers)
8338 {
8339 /*
8340 * Carefully and efficiently dealing with access handler return
8341 * codes make this a little bloated.
8342 */
8343 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8344 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8345 pbBuf,
8346 cbFirst,
8347 PGMACCESSORIGIN_IEM);
8348 if (rcStrict == VINF_SUCCESS)
8349 {
8350 if (cbSecond)
8351 {
8352 rcStrict = PGMPhysWrite(pVM,
8353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8354 pbBuf + cbFirst,
8355 cbSecond,
8356 PGMACCESSORIGIN_IEM);
8357 if (rcStrict == VINF_SUCCESS)
8358 { /* nothing */ }
8359 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8360 {
8361 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8363 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8364 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8365 }
8366# ifndef IN_RING3
8367 else if (fPostponeFail)
8368 {
8369 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8373 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8374 return iemSetPassUpStatus(pVCpu, rcStrict);
8375 }
8376# endif
8377 else
8378 {
8379 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8382 return rcStrict;
8383 }
8384 }
8385 }
8386 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8387 {
8388 if (!cbSecond)
8389 {
8390 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8392 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8393 }
8394 else
8395 {
8396 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8397 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8398 pbBuf + cbFirst,
8399 cbSecond,
8400 PGMACCESSORIGIN_IEM);
8401 if (rcStrict2 == VINF_SUCCESS)
8402 {
8403 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8406 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8407 }
8408 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8409 {
8410 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8413 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8414 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8415 }
8416# ifndef IN_RING3
8417 else if (fPostponeFail)
8418 {
8419 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8420 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8421 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8422 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8423 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8424 return iemSetPassUpStatus(pVCpu, rcStrict);
8425 }
8426# endif
8427 else
8428 {
8429 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8430 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8432 return rcStrict2;
8433 }
8434 }
8435 }
8436# ifndef IN_RING3
8437 else if (fPostponeFail)
8438 {
8439 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8440 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8442 if (!cbSecond)
8443 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8444 else
8445 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8446 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8447 return iemSetPassUpStatus(pVCpu, rcStrict);
8448 }
8449# endif
8450 else
8451 {
8452 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8455 return rcStrict;
8456 }
8457 }
8458 else
8459 {
8460 /*
8461 * No access handlers, much simpler.
8462 */
8463 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8464 if (RT_SUCCESS(rc))
8465 {
8466 if (cbSecond)
8467 {
8468 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8469 if (RT_SUCCESS(rc))
8470 { /* likely */ }
8471 else
8472 {
8473 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8474 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8476 return rc;
8477 }
8478 }
8479 }
8480 else
8481 {
8482 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8484 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8485 return rc;
8486 }
8487 }
8488 }
8489#endif
8490
8491#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8492 /*
8493 * Record the write(s).
8494 */
8495 if (!pVCpu->iem.s.fNoRem)
8496 {
8497 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8498 if (pEvtRec)
8499 {
8500 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8501 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8502 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8503 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8504 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8505 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8506 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8507 }
8508 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8509 {
8510 pEvtRec = iemVerifyAllocRecord(pVCpu);
8511 if (pEvtRec)
8512 {
8513 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8514 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8515 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8516 memcpy(pEvtRec->u.RamWrite.ab,
8517 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8518 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8519 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8520 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8521 }
8522 }
8523 }
8524#endif
8525#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8526 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8527 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8528 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8529 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8530 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8531 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8532
8533 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8534 g_cbIemWrote = cbWrote;
8535 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8536#endif
8537
8538 /*
8539 * Free the mapping entry.
8540 */
8541 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8542 Assert(pVCpu->iem.s.cActiveMappings != 0);
8543 pVCpu->iem.s.cActiveMappings--;
8544 return VINF_SUCCESS;
8545}
8546
8547
8548/**
8549 * iemMemMap worker that deals with a request crossing pages.
8550 */
8551IEM_STATIC VBOXSTRICTRC
8552iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8553{
8554 /*
8555 * Do the address translations.
8556 */
8557 RTGCPHYS GCPhysFirst;
8558 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8559 if (rcStrict != VINF_SUCCESS)
8560 return rcStrict;
8561
8562 RTGCPHYS GCPhysSecond;
8563 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8564 fAccess, &GCPhysSecond);
8565 if (rcStrict != VINF_SUCCESS)
8566 return rcStrict;
8567 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8568
8569 PVM pVM = pVCpu->CTX_SUFF(pVM);
8570#ifdef IEM_VERIFICATION_MODE_FULL
8571 /*
8572 * Detect problematic memory when verifying so we can select
8573 * the right execution engine. (TLB: Redo this.)
8574 */
8575 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8576 {
8577 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8578 if (RT_SUCCESS(rc2))
8579 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8580 if (RT_FAILURE(rc2))
8581 pVCpu->iem.s.fProblematicMemory = true;
8582 }
8583#endif
8584
8585
8586 /*
8587 * Read in the current memory content if it's a read, execute or partial
8588 * write access.
8589 */
8590 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8591 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8592 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8593
8594 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8595 {
8596 if (!pVCpu->iem.s.fBypassHandlers)
8597 {
8598 /*
8599 * Must carefully deal with access handler status codes here,
8600 * makes the code a bit bloated.
8601 */
8602 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8603 if (rcStrict == VINF_SUCCESS)
8604 {
8605 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8606 if (rcStrict == VINF_SUCCESS)
8607 { /*likely */ }
8608 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8609 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8610 else
8611 {
8612 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8613 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8614 return rcStrict;
8615 }
8616 }
8617 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8618 {
8619 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8620 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8621 {
8622 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8623 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8624 }
8625 else
8626 {
8627 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8628 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8629 return rcStrict2;
8630 }
8631 }
8632 else
8633 {
8634 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8635 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8636 return rcStrict;
8637 }
8638 }
8639 else
8640 {
8641 /*
8642 * No informational status codes here, much more straight forward.
8643 */
8644 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8645 if (RT_SUCCESS(rc))
8646 {
8647 Assert(rc == VINF_SUCCESS);
8648 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8649 if (RT_SUCCESS(rc))
8650 Assert(rc == VINF_SUCCESS);
8651 else
8652 {
8653 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8654 return rc;
8655 }
8656 }
8657 else
8658 {
8659 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8660 return rc;
8661 }
8662 }
8663
8664#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8665 if ( !pVCpu->iem.s.fNoRem
8666 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8667 {
8668 /*
8669 * Record the reads.
8670 */
8671 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8672 if (pEvtRec)
8673 {
8674 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8675 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8676 pEvtRec->u.RamRead.cb = cbFirstPage;
8677 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8678 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8679 }
8680 pEvtRec = iemVerifyAllocRecord(pVCpu);
8681 if (pEvtRec)
8682 {
8683 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8684 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8685 pEvtRec->u.RamRead.cb = cbSecondPage;
8686 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8687 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8688 }
8689 }
8690#endif
8691 }
8692#ifdef VBOX_STRICT
8693 else
8694 memset(pbBuf, 0xcc, cbMem);
8695 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8696 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8697#endif
8698
8699 /*
8700 * Commit the bounce buffer entry.
8701 */
8702 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8703 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8704 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8705 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8706 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8707 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8708 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8709 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8710 pVCpu->iem.s.cActiveMappings++;
8711
8712 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8713 *ppvMem = pbBuf;
8714 return VINF_SUCCESS;
8715}
8716
8717
8718/**
8719 * iemMemMap woker that deals with iemMemPageMap failures.
8720 */
8721IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8722 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8723{
8724 /*
8725 * Filter out conditions we can handle and the ones which shouldn't happen.
8726 */
8727 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8728 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8729 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8730 {
8731 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8732 return rcMap;
8733 }
8734 pVCpu->iem.s.cPotentialExits++;
8735
8736 /*
8737 * Read in the current memory content if it's a read, execute or partial
8738 * write access.
8739 */
8740 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8741 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8742 {
8743 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8744 memset(pbBuf, 0xff, cbMem);
8745 else
8746 {
8747 int rc;
8748 if (!pVCpu->iem.s.fBypassHandlers)
8749 {
8750 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8751 if (rcStrict == VINF_SUCCESS)
8752 { /* nothing */ }
8753 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8754 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8755 else
8756 {
8757 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8758 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8759 return rcStrict;
8760 }
8761 }
8762 else
8763 {
8764 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8765 if (RT_SUCCESS(rc))
8766 { /* likely */ }
8767 else
8768 {
8769 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8770 GCPhysFirst, rc));
8771 return rc;
8772 }
8773 }
8774 }
8775
8776#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8777 if ( !pVCpu->iem.s.fNoRem
8778 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8779 {
8780 /*
8781 * Record the read.
8782 */
8783 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8784 if (pEvtRec)
8785 {
8786 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8787 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8788 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8789 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8790 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8791 }
8792 }
8793#endif
8794 }
8795#ifdef VBOX_STRICT
8796 else
8797 memset(pbBuf, 0xcc, cbMem);
8798#endif
8799#ifdef VBOX_STRICT
8800 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8801 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8802#endif
8803
8804 /*
8805 * Commit the bounce buffer entry.
8806 */
8807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8808 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8809 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8810 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8811 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8812 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8813 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8814 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8815 pVCpu->iem.s.cActiveMappings++;
8816
8817 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8818 *ppvMem = pbBuf;
8819 return VINF_SUCCESS;
8820}
8821
8822
8823
8824/**
8825 * Maps the specified guest memory for the given kind of access.
8826 *
8827 * This may be using bounce buffering of the memory if it's crossing a page
8828 * boundary or if there is an access handler installed for any of it. Because
8829 * of lock prefix guarantees, we're in for some extra clutter when this
8830 * happens.
8831 *
8832 * This may raise a \#GP, \#SS, \#PF or \#AC.
8833 *
8834 * @returns VBox strict status code.
8835 *
8836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8837 * @param ppvMem Where to return the pointer to the mapped
8838 * memory.
8839 * @param cbMem The number of bytes to map. This is usually 1,
8840 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8841 * string operations it can be up to a page.
8842 * @param iSegReg The index of the segment register to use for
8843 * this access. The base and limits are checked.
8844 * Use UINT8_MAX to indicate that no segmentation
8845 * is required (for IDT, GDT and LDT accesses).
8846 * @param GCPtrMem The address of the guest memory.
8847 * @param fAccess How the memory is being accessed. The
8848 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8849 * how to map the memory, while the
8850 * IEM_ACCESS_WHAT_XXX bit is used when raising
8851 * exceptions.
8852 */
8853IEM_STATIC VBOXSTRICTRC
8854iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8855{
8856 /*
8857 * Check the input and figure out which mapping entry to use.
8858 */
8859 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8860 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8861 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8862
8863 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8864 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8865 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8866 {
8867 iMemMap = iemMemMapFindFree(pVCpu);
8868 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8869 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8870 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8871 pVCpu->iem.s.aMemMappings[2].fAccess),
8872 VERR_IEM_IPE_9);
8873 }
8874
8875 /*
8876 * Map the memory, checking that we can actually access it. If something
8877 * slightly complicated happens, fall back on bounce buffering.
8878 */
8879 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8880 if (rcStrict != VINF_SUCCESS)
8881 return rcStrict;
8882
8883 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8884 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8885
8886 RTGCPHYS GCPhysFirst;
8887 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8888 if (rcStrict != VINF_SUCCESS)
8889 return rcStrict;
8890
8891 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8892 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8893 if (fAccess & IEM_ACCESS_TYPE_READ)
8894 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8895
8896 void *pvMem;
8897 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8898 if (rcStrict != VINF_SUCCESS)
8899 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8900
8901 /*
8902 * Fill in the mapping table entry.
8903 */
8904 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8905 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8906 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8907 pVCpu->iem.s.cActiveMappings++;
8908
8909 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8910 *ppvMem = pvMem;
8911 return VINF_SUCCESS;
8912}
8913
8914
8915/**
8916 * Commits the guest memory if bounce buffered and unmaps it.
8917 *
8918 * @returns Strict VBox status code.
8919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8920 * @param pvMem The mapping.
8921 * @param fAccess The kind of access.
8922 */
8923IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8924{
8925 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8926 AssertReturn(iMemMap >= 0, iMemMap);
8927
8928 /* If it's bounce buffered, we may need to write back the buffer. */
8929 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8930 {
8931 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8932 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8933 }
8934 /* Otherwise unlock it. */
8935 else
8936 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8937
8938 /* Free the entry. */
8939 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8940 Assert(pVCpu->iem.s.cActiveMappings != 0);
8941 pVCpu->iem.s.cActiveMappings--;
8942 return VINF_SUCCESS;
8943}
8944
8945#ifdef IEM_WITH_SETJMP
8946
8947/**
8948 * Maps the specified guest memory for the given kind of access, longjmp on
8949 * error.
8950 *
8951 * This may be using bounce buffering of the memory if it's crossing a page
8952 * boundary or if there is an access handler installed for any of it. Because
8953 * of lock prefix guarantees, we're in for some extra clutter when this
8954 * happens.
8955 *
8956 * This may raise a \#GP, \#SS, \#PF or \#AC.
8957 *
8958 * @returns Pointer to the mapped memory.
8959 *
8960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8961 * @param cbMem The number of bytes to map. This is usually 1,
8962 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8963 * string operations it can be up to a page.
8964 * @param iSegReg The index of the segment register to use for
8965 * this access. The base and limits are checked.
8966 * Use UINT8_MAX to indicate that no segmentation
8967 * is required (for IDT, GDT and LDT accesses).
8968 * @param GCPtrMem The address of the guest memory.
8969 * @param fAccess How the memory is being accessed. The
8970 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8971 * how to map the memory, while the
8972 * IEM_ACCESS_WHAT_XXX bit is used when raising
8973 * exceptions.
8974 */
8975IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8976{
8977 /*
8978 * Check the input and figure out which mapping entry to use.
8979 */
8980 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8981 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8982 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8983
8984 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8985 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8986 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8987 {
8988 iMemMap = iemMemMapFindFree(pVCpu);
8989 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8990 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8991 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8992 pVCpu->iem.s.aMemMappings[2].fAccess),
8993 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8994 }
8995
8996 /*
8997 * Map the memory, checking that we can actually access it. If something
8998 * slightly complicated happens, fall back on bounce buffering.
8999 */
9000 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9001 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9002 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9003
9004 /* Crossing a page boundary? */
9005 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9006 { /* No (likely). */ }
9007 else
9008 {
9009 void *pvMem;
9010 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9011 if (rcStrict == VINF_SUCCESS)
9012 return pvMem;
9013 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9014 }
9015
9016 RTGCPHYS GCPhysFirst;
9017 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9018 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9019 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9020
9021 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9022 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9023 if (fAccess & IEM_ACCESS_TYPE_READ)
9024 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9025
9026 void *pvMem;
9027 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9028 if (rcStrict == VINF_SUCCESS)
9029 { /* likely */ }
9030 else
9031 {
9032 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9033 if (rcStrict == VINF_SUCCESS)
9034 return pvMem;
9035 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9036 }
9037
9038 /*
9039 * Fill in the mapping table entry.
9040 */
9041 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9042 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9043 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9044 pVCpu->iem.s.cActiveMappings++;
9045
9046 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9047 return pvMem;
9048}
9049
9050
9051/**
9052 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9053 *
9054 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9055 * @param pvMem The mapping.
9056 * @param fAccess The kind of access.
9057 */
9058IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9059{
9060 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9061 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9062
9063 /* If it's bounce buffered, we may need to write back the buffer. */
9064 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9065 {
9066 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9067 {
9068 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9069 if (rcStrict == VINF_SUCCESS)
9070 return;
9071 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9072 }
9073 }
9074 /* Otherwise unlock it. */
9075 else
9076 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9077
9078 /* Free the entry. */
9079 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9080 Assert(pVCpu->iem.s.cActiveMappings != 0);
9081 pVCpu->iem.s.cActiveMappings--;
9082}
9083
9084#endif
9085
9086#ifndef IN_RING3
9087/**
9088 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9089 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9090 *
9091 * Allows the instruction to be completed and retired, while the IEM user will
9092 * return to ring-3 immediately afterwards and do the postponed writes there.
9093 *
9094 * @returns VBox status code (no strict statuses). Caller must check
9095 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9097 * @param pvMem The mapping.
9098 * @param fAccess The kind of access.
9099 */
9100IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9101{
9102 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9103 AssertReturn(iMemMap >= 0, iMemMap);
9104
9105 /* If it's bounce buffered, we may need to write back the buffer. */
9106 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9107 {
9108 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9109 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9110 }
9111 /* Otherwise unlock it. */
9112 else
9113 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9114
9115 /* Free the entry. */
9116 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9117 Assert(pVCpu->iem.s.cActiveMappings != 0);
9118 pVCpu->iem.s.cActiveMappings--;
9119 return VINF_SUCCESS;
9120}
9121#endif
9122
9123
9124/**
9125 * Rollbacks mappings, releasing page locks and such.
9126 *
9127 * The caller shall only call this after checking cActiveMappings.
9128 *
9129 * @returns Strict VBox status code to pass up.
9130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9131 */
9132IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9133{
9134 Assert(pVCpu->iem.s.cActiveMappings > 0);
9135
9136 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9137 while (iMemMap-- > 0)
9138 {
9139 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9140 if (fAccess != IEM_ACCESS_INVALID)
9141 {
9142 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9143 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9144 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9145 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9146 Assert(pVCpu->iem.s.cActiveMappings > 0);
9147 pVCpu->iem.s.cActiveMappings--;
9148 }
9149 }
9150}
9151
9152
9153/**
9154 * Fetches a data byte.
9155 *
9156 * @returns Strict VBox status code.
9157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9158 * @param pu8Dst Where to return the byte.
9159 * @param iSegReg The index of the segment register to use for
9160 * this access. The base and limits are checked.
9161 * @param GCPtrMem The address of the guest memory.
9162 */
9163IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9164{
9165 /* The lazy approach for now... */
9166 uint8_t const *pu8Src;
9167 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9168 if (rc == VINF_SUCCESS)
9169 {
9170 *pu8Dst = *pu8Src;
9171 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9172 }
9173 return rc;
9174}
9175
9176
9177#ifdef IEM_WITH_SETJMP
9178/**
9179 * Fetches a data byte, longjmp on error.
9180 *
9181 * @returns The byte.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param iSegReg The index of the segment register to use for
9184 * this access. The base and limits are checked.
9185 * @param GCPtrMem The address of the guest memory.
9186 */
9187DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9188{
9189 /* The lazy approach for now... */
9190 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9191 uint8_t const bRet = *pu8Src;
9192 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9193 return bRet;
9194}
9195#endif /* IEM_WITH_SETJMP */
9196
9197
9198/**
9199 * Fetches a data word.
9200 *
9201 * @returns Strict VBox status code.
9202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9203 * @param pu16Dst Where to return the word.
9204 * @param iSegReg The index of the segment register to use for
9205 * this access. The base and limits are checked.
9206 * @param GCPtrMem The address of the guest memory.
9207 */
9208IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9209{
9210 /* The lazy approach for now... */
9211 uint16_t const *pu16Src;
9212 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9213 if (rc == VINF_SUCCESS)
9214 {
9215 *pu16Dst = *pu16Src;
9216 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9217 }
9218 return rc;
9219}
9220
9221
9222#ifdef IEM_WITH_SETJMP
9223/**
9224 * Fetches a data word, longjmp on error.
9225 *
9226 * @returns The word
9227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9228 * @param iSegReg The index of the segment register to use for
9229 * this access. The base and limits are checked.
9230 * @param GCPtrMem The address of the guest memory.
9231 */
9232DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9233{
9234 /* The lazy approach for now... */
9235 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9236 uint16_t const u16Ret = *pu16Src;
9237 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9238 return u16Ret;
9239}
9240#endif
9241
9242
9243/**
9244 * Fetches a data dword.
9245 *
9246 * @returns Strict VBox status code.
9247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9248 * @param pu32Dst Where to return the dword.
9249 * @param iSegReg The index of the segment register to use for
9250 * this access. The base and limits are checked.
9251 * @param GCPtrMem The address of the guest memory.
9252 */
9253IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9254{
9255 /* The lazy approach for now... */
9256 uint32_t const *pu32Src;
9257 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9258 if (rc == VINF_SUCCESS)
9259 {
9260 *pu32Dst = *pu32Src;
9261 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9262 }
9263 return rc;
9264}
9265
9266
9267#ifdef IEM_WITH_SETJMP
9268
9269IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9270{
9271 Assert(cbMem >= 1);
9272 Assert(iSegReg < X86_SREG_COUNT);
9273
9274 /*
9275 * 64-bit mode is simpler.
9276 */
9277 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9278 {
9279 if (iSegReg >= X86_SREG_FS)
9280 {
9281 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9282 GCPtrMem += pSel->u64Base;
9283 }
9284
9285 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9286 return GCPtrMem;
9287 }
9288 /*
9289 * 16-bit and 32-bit segmentation.
9290 */
9291 else
9292 {
9293 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9294 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9295 == X86DESCATTR_P /* data, expand up */
9296 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9297 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9298 {
9299 /* expand up */
9300 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9301 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9302 && GCPtrLast32 > (uint32_t)GCPtrMem))
9303 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9304 }
9305 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9306 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9307 {
9308 /* expand down */
9309 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9310 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9311 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9312 && GCPtrLast32 > (uint32_t)GCPtrMem))
9313 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9314 }
9315 else
9316 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9317 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9318 }
9319 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9320}
9321
9322
9323IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9324{
9325 Assert(cbMem >= 1);
9326 Assert(iSegReg < X86_SREG_COUNT);
9327
9328 /*
9329 * 64-bit mode is simpler.
9330 */
9331 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9332 {
9333 if (iSegReg >= X86_SREG_FS)
9334 {
9335 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9336 GCPtrMem += pSel->u64Base;
9337 }
9338
9339 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9340 return GCPtrMem;
9341 }
9342 /*
9343 * 16-bit and 32-bit segmentation.
9344 */
9345 else
9346 {
9347 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9348 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9349 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9350 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9351 {
9352 /* expand up */
9353 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9354 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9355 && GCPtrLast32 > (uint32_t)GCPtrMem))
9356 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9357 }
9358 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9359 {
9360 /* expand down */
9361 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9362 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9363 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9364 && GCPtrLast32 > (uint32_t)GCPtrMem))
9365 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9366 }
9367 else
9368 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9369 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9370 }
9371 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9372}
9373
9374
9375/**
9376 * Fetches a data dword, longjmp on error, fallback/safe version.
9377 *
9378 * @returns The dword
9379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9380 * @param iSegReg The index of the segment register to use for
9381 * this access. The base and limits are checked.
9382 * @param GCPtrMem The address of the guest memory.
9383 */
9384IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9385{
9386 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9387 uint32_t const u32Ret = *pu32Src;
9388 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9389 return u32Ret;
9390}
9391
9392
9393/**
9394 * Fetches a data dword, longjmp on error.
9395 *
9396 * @returns The dword
9397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9398 * @param iSegReg The index of the segment register to use for
9399 * this access. The base and limits are checked.
9400 * @param GCPtrMem The address of the guest memory.
9401 */
9402DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9403{
9404# ifdef IEM_WITH_DATA_TLB
9405 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9406 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9407 {
9408 /// @todo more later.
9409 }
9410
9411 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9412# else
9413 /* The lazy approach. */
9414 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9415 uint32_t const u32Ret = *pu32Src;
9416 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9417 return u32Ret;
9418# endif
9419}
9420#endif
9421
9422
9423#ifdef SOME_UNUSED_FUNCTION
9424/**
9425 * Fetches a data dword and sign extends it to a qword.
9426 *
9427 * @returns Strict VBox status code.
9428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9429 * @param pu64Dst Where to return the sign extended value.
9430 * @param iSegReg The index of the segment register to use for
9431 * this access. The base and limits are checked.
9432 * @param GCPtrMem The address of the guest memory.
9433 */
9434IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9435{
9436 /* The lazy approach for now... */
9437 int32_t const *pi32Src;
9438 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9439 if (rc == VINF_SUCCESS)
9440 {
9441 *pu64Dst = *pi32Src;
9442 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9443 }
9444#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9445 else
9446 *pu64Dst = 0;
9447#endif
9448 return rc;
9449}
9450#endif
9451
9452
9453/**
9454 * Fetches a data qword.
9455 *
9456 * @returns Strict VBox status code.
9457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9458 * @param pu64Dst Where to return the qword.
9459 * @param iSegReg The index of the segment register to use for
9460 * this access. The base and limits are checked.
9461 * @param GCPtrMem The address of the guest memory.
9462 */
9463IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9464{
9465 /* The lazy approach for now... */
9466 uint64_t const *pu64Src;
9467 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9468 if (rc == VINF_SUCCESS)
9469 {
9470 *pu64Dst = *pu64Src;
9471 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9472 }
9473 return rc;
9474}
9475
9476
9477#ifdef IEM_WITH_SETJMP
9478/**
9479 * Fetches a data qword, longjmp on error.
9480 *
9481 * @returns The qword.
9482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9483 * @param iSegReg The index of the segment register to use for
9484 * this access. The base and limits are checked.
9485 * @param GCPtrMem The address of the guest memory.
9486 */
9487DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9488{
9489 /* The lazy approach for now... */
9490 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9491 uint64_t const u64Ret = *pu64Src;
9492 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9493 return u64Ret;
9494}
9495#endif
9496
9497
9498/**
9499 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9500 *
9501 * @returns Strict VBox status code.
9502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9503 * @param pu64Dst Where to return the qword.
9504 * @param iSegReg The index of the segment register to use for
9505 * this access. The base and limits are checked.
9506 * @param GCPtrMem The address of the guest memory.
9507 */
9508IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9509{
9510 /* The lazy approach for now... */
9511 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9512 if (RT_UNLIKELY(GCPtrMem & 15))
9513 return iemRaiseGeneralProtectionFault0(pVCpu);
9514
9515 uint64_t const *pu64Src;
9516 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9517 if (rc == VINF_SUCCESS)
9518 {
9519 *pu64Dst = *pu64Src;
9520 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9521 }
9522 return rc;
9523}
9524
9525
9526#ifdef IEM_WITH_SETJMP
9527/**
9528 * Fetches a data qword, longjmp on error.
9529 *
9530 * @returns The qword.
9531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9532 * @param iSegReg The index of the segment register to use for
9533 * this access. The base and limits are checked.
9534 * @param GCPtrMem The address of the guest memory.
9535 */
9536DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9537{
9538 /* The lazy approach for now... */
9539 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9540 if (RT_LIKELY(!(GCPtrMem & 15)))
9541 {
9542 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9543 uint64_t const u64Ret = *pu64Src;
9544 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9545 return u64Ret;
9546 }
9547
9548 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9549 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9550}
9551#endif
9552
9553
9554/**
9555 * Fetches a data tword.
9556 *
9557 * @returns Strict VBox status code.
9558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9559 * @param pr80Dst Where to return the tword.
9560 * @param iSegReg The index of the segment register to use for
9561 * this access. The base and limits are checked.
9562 * @param GCPtrMem The address of the guest memory.
9563 */
9564IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9565{
9566 /* The lazy approach for now... */
9567 PCRTFLOAT80U pr80Src;
9568 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9569 if (rc == VINF_SUCCESS)
9570 {
9571 *pr80Dst = *pr80Src;
9572 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9573 }
9574 return rc;
9575}
9576
9577
9578#ifdef IEM_WITH_SETJMP
9579/**
9580 * Fetches a data tword, longjmp on error.
9581 *
9582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9583 * @param pr80Dst Where to return the tword.
9584 * @param iSegReg The index of the segment register to use for
9585 * this access. The base and limits are checked.
9586 * @param GCPtrMem The address of the guest memory.
9587 */
9588DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9589{
9590 /* The lazy approach for now... */
9591 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9592 *pr80Dst = *pr80Src;
9593 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9594}
9595#endif
9596
9597
9598/**
9599 * Fetches a data dqword (double qword), generally SSE related.
9600 *
9601 * @returns Strict VBox status code.
9602 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9603 * @param pu128Dst Where to return the qword.
9604 * @param iSegReg The index of the segment register to use for
9605 * this access. The base and limits are checked.
9606 * @param GCPtrMem The address of the guest memory.
9607 */
9608IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9609{
9610 /* The lazy approach for now... */
9611 PCRTUINT128U pu128Src;
9612 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9613 if (rc == VINF_SUCCESS)
9614 {
9615 pu128Dst->au64[0] = pu128Src->au64[0];
9616 pu128Dst->au64[1] = pu128Src->au64[1];
9617 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9618 }
9619 return rc;
9620}
9621
9622
9623#ifdef IEM_WITH_SETJMP
9624/**
9625 * Fetches a data dqword (double qword), generally SSE related.
9626 *
9627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9628 * @param pu128Dst Where to return the qword.
9629 * @param iSegReg The index of the segment register to use for
9630 * this access. The base and limits are checked.
9631 * @param GCPtrMem The address of the guest memory.
9632 */
9633IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9634{
9635 /* The lazy approach for now... */
9636 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9637 pu128Dst->au64[0] = pu128Src->au64[0];
9638 pu128Dst->au64[1] = pu128Src->au64[1];
9639 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9640}
9641#endif
9642
9643
9644/**
9645 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9646 * related.
9647 *
9648 * Raises \#GP(0) if not aligned.
9649 *
9650 * @returns Strict VBox status code.
9651 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9652 * @param pu128Dst Where to return the qword.
9653 * @param iSegReg The index of the segment register to use for
9654 * this access. The base and limits are checked.
9655 * @param GCPtrMem The address of the guest memory.
9656 */
9657IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9658{
9659 /* The lazy approach for now... */
9660 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9661 if ( (GCPtrMem & 15)
9662 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9663 return iemRaiseGeneralProtectionFault0(pVCpu);
9664
9665 PCRTUINT128U pu128Src;
9666 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9667 if (rc == VINF_SUCCESS)
9668 {
9669 pu128Dst->au64[0] = pu128Src->au64[0];
9670 pu128Dst->au64[1] = pu128Src->au64[1];
9671 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9672 }
9673 return rc;
9674}
9675
9676
9677#ifdef IEM_WITH_SETJMP
9678/**
9679 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9680 * related, longjmp on error.
9681 *
9682 * Raises \#GP(0) if not aligned.
9683 *
9684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9685 * @param pu128Dst Where to return the qword.
9686 * @param iSegReg The index of the segment register to use for
9687 * this access. The base and limits are checked.
9688 * @param GCPtrMem The address of the guest memory.
9689 */
9690DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9691{
9692 /* The lazy approach for now... */
9693 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9694 if ( (GCPtrMem & 15) == 0
9695 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9696 {
9697 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9698 pu128Dst->au64[0] = pu128Src->au64[0];
9699 pu128Dst->au64[1] = pu128Src->au64[1];
9700 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9701 return;
9702 }
9703
9704 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9705 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9706}
9707#endif
9708
9709
9710/**
9711 * Fetches a data oword (octo word), generally AVX related.
9712 *
9713 * @returns Strict VBox status code.
9714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9715 * @param pu256Dst Where to return the qword.
9716 * @param iSegReg The index of the segment register to use for
9717 * this access. The base and limits are checked.
9718 * @param GCPtrMem The address of the guest memory.
9719 */
9720IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9721{
9722 /* The lazy approach for now... */
9723 PCRTUINT256U pu256Src;
9724 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9725 if (rc == VINF_SUCCESS)
9726 {
9727 pu256Dst->au64[0] = pu256Src->au64[0];
9728 pu256Dst->au64[1] = pu256Src->au64[1];
9729 pu256Dst->au64[2] = pu256Src->au64[2];
9730 pu256Dst->au64[3] = pu256Src->au64[3];
9731 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9732 }
9733 return rc;
9734}
9735
9736
9737#ifdef IEM_WITH_SETJMP
9738/**
9739 * Fetches a data oword (octo word), generally AVX related.
9740 *
9741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9742 * @param pu256Dst Where to return the qword.
9743 * @param iSegReg The index of the segment register to use for
9744 * this access. The base and limits are checked.
9745 * @param GCPtrMem The address of the guest memory.
9746 */
9747IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9748{
9749 /* The lazy approach for now... */
9750 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9751 pu256Dst->au64[0] = pu256Src->au64[0];
9752 pu256Dst->au64[1] = pu256Src->au64[1];
9753 pu256Dst->au64[2] = pu256Src->au64[2];
9754 pu256Dst->au64[3] = pu256Src->au64[3];
9755 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9756}
9757#endif
9758
9759
9760/**
9761 * Fetches a data oword (octo word) at an aligned address, generally AVX
9762 * related.
9763 *
9764 * Raises \#GP(0) if not aligned.
9765 *
9766 * @returns Strict VBox status code.
9767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9768 * @param pu256Dst Where to return the qword.
9769 * @param iSegReg The index of the segment register to use for
9770 * this access. The base and limits are checked.
9771 * @param GCPtrMem The address of the guest memory.
9772 */
9773IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9774{
9775 /* The lazy approach for now... */
9776 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9777 if (GCPtrMem & 31)
9778 return iemRaiseGeneralProtectionFault0(pVCpu);
9779
9780 PCRTUINT256U pu256Src;
9781 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9782 if (rc == VINF_SUCCESS)
9783 {
9784 pu256Dst->au64[0] = pu256Src->au64[0];
9785 pu256Dst->au64[1] = pu256Src->au64[1];
9786 pu256Dst->au64[2] = pu256Src->au64[2];
9787 pu256Dst->au64[3] = pu256Src->au64[3];
9788 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9789 }
9790 return rc;
9791}
9792
9793
9794#ifdef IEM_WITH_SETJMP
9795/**
9796 * Fetches a data oword (octo word) at an aligned address, generally AVX
9797 * related, longjmp on error.
9798 *
9799 * Raises \#GP(0) if not aligned.
9800 *
9801 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9802 * @param pu256Dst Where to return the qword.
9803 * @param iSegReg The index of the segment register to use for
9804 * this access. The base and limits are checked.
9805 * @param GCPtrMem The address of the guest memory.
9806 */
9807DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9808{
9809 /* The lazy approach for now... */
9810 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9811 if ((GCPtrMem & 31) == 0)
9812 {
9813 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9814 pu256Dst->au64[0] = pu256Src->au64[0];
9815 pu256Dst->au64[1] = pu256Src->au64[1];
9816 pu256Dst->au64[2] = pu256Src->au64[2];
9817 pu256Dst->au64[3] = pu256Src->au64[3];
9818 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9819 return;
9820 }
9821
9822 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9823 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9824}
9825#endif
9826
9827
9828
9829/**
9830 * Fetches a descriptor register (lgdt, lidt).
9831 *
9832 * @returns Strict VBox status code.
9833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9834 * @param pcbLimit Where to return the limit.
9835 * @param pGCPtrBase Where to return the base.
9836 * @param iSegReg The index of the segment register to use for
9837 * this access. The base and limits are checked.
9838 * @param GCPtrMem The address of the guest memory.
9839 * @param enmOpSize The effective operand size.
9840 */
9841IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9842 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9843{
9844 /*
9845 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9846 * little special:
9847 * - The two reads are done separately.
9848 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9849 * - We suspect the 386 to actually commit the limit before the base in
9850 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9851 * don't try emulate this eccentric behavior, because it's not well
9852 * enough understood and rather hard to trigger.
9853 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9854 */
9855 VBOXSTRICTRC rcStrict;
9856 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9857 {
9858 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9859 if (rcStrict == VINF_SUCCESS)
9860 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9861 }
9862 else
9863 {
9864 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9865 if (enmOpSize == IEMMODE_32BIT)
9866 {
9867 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9868 {
9869 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9870 if (rcStrict == VINF_SUCCESS)
9871 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9872 }
9873 else
9874 {
9875 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9876 if (rcStrict == VINF_SUCCESS)
9877 {
9878 *pcbLimit = (uint16_t)uTmp;
9879 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9880 }
9881 }
9882 if (rcStrict == VINF_SUCCESS)
9883 *pGCPtrBase = uTmp;
9884 }
9885 else
9886 {
9887 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9888 if (rcStrict == VINF_SUCCESS)
9889 {
9890 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9891 if (rcStrict == VINF_SUCCESS)
9892 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9893 }
9894 }
9895 }
9896 return rcStrict;
9897}
9898
9899
9900
9901/**
9902 * Stores a data byte.
9903 *
9904 * @returns Strict VBox status code.
9905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9906 * @param iSegReg The index of the segment register to use for
9907 * this access. The base and limits are checked.
9908 * @param GCPtrMem The address of the guest memory.
9909 * @param u8Value The value to store.
9910 */
9911IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9912{
9913 /* The lazy approach for now... */
9914 uint8_t *pu8Dst;
9915 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9916 if (rc == VINF_SUCCESS)
9917 {
9918 *pu8Dst = u8Value;
9919 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9920 }
9921 return rc;
9922}
9923
9924
9925#ifdef IEM_WITH_SETJMP
9926/**
9927 * Stores a data byte, longjmp on error.
9928 *
9929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9930 * @param iSegReg The index of the segment register to use for
9931 * this access. The base and limits are checked.
9932 * @param GCPtrMem The address of the guest memory.
9933 * @param u8Value The value to store.
9934 */
9935IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9936{
9937 /* The lazy approach for now... */
9938 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9939 *pu8Dst = u8Value;
9940 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9941}
9942#endif
9943
9944
9945/**
9946 * Stores a data word.
9947 *
9948 * @returns Strict VBox status code.
9949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9950 * @param iSegReg The index of the segment register to use for
9951 * this access. The base and limits are checked.
9952 * @param GCPtrMem The address of the guest memory.
9953 * @param u16Value The value to store.
9954 */
9955IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9956{
9957 /* The lazy approach for now... */
9958 uint16_t *pu16Dst;
9959 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9960 if (rc == VINF_SUCCESS)
9961 {
9962 *pu16Dst = u16Value;
9963 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9964 }
9965 return rc;
9966}
9967
9968
9969#ifdef IEM_WITH_SETJMP
9970/**
9971 * Stores a data word, longjmp on error.
9972 *
9973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9974 * @param iSegReg The index of the segment register to use for
9975 * this access. The base and limits are checked.
9976 * @param GCPtrMem The address of the guest memory.
9977 * @param u16Value The value to store.
9978 */
9979IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9980{
9981 /* The lazy approach for now... */
9982 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9983 *pu16Dst = u16Value;
9984 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9985}
9986#endif
9987
9988
9989/**
9990 * Stores a data dword.
9991 *
9992 * @returns Strict VBox status code.
9993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9994 * @param iSegReg The index of the segment register to use for
9995 * this access. The base and limits are checked.
9996 * @param GCPtrMem The address of the guest memory.
9997 * @param u32Value The value to store.
9998 */
9999IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10000{
10001 /* The lazy approach for now... */
10002 uint32_t *pu32Dst;
10003 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10004 if (rc == VINF_SUCCESS)
10005 {
10006 *pu32Dst = u32Value;
10007 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10008 }
10009 return rc;
10010}
10011
10012
10013#ifdef IEM_WITH_SETJMP
10014/**
10015 * Stores a data dword.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u32Value The value to store.
10023 */
10024IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10025{
10026 /* The lazy approach for now... */
10027 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10028 *pu32Dst = u32Value;
10029 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10030}
10031#endif
10032
10033
10034/**
10035 * Stores a data qword.
10036 *
10037 * @returns Strict VBox status code.
10038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10039 * @param iSegReg The index of the segment register to use for
10040 * this access. The base and limits are checked.
10041 * @param GCPtrMem The address of the guest memory.
10042 * @param u64Value The value to store.
10043 */
10044IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10045{
10046 /* The lazy approach for now... */
10047 uint64_t *pu64Dst;
10048 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10049 if (rc == VINF_SUCCESS)
10050 {
10051 *pu64Dst = u64Value;
10052 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10053 }
10054 return rc;
10055}
10056
10057
10058#ifdef IEM_WITH_SETJMP
10059/**
10060 * Stores a data qword, longjmp on error.
10061 *
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param iSegReg The index of the segment register to use for
10064 * this access. The base and limits are checked.
10065 * @param GCPtrMem The address of the guest memory.
10066 * @param u64Value The value to store.
10067 */
10068IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10069{
10070 /* The lazy approach for now... */
10071 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10072 *pu64Dst = u64Value;
10073 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10074}
10075#endif
10076
10077
10078/**
10079 * Stores a data dqword.
10080 *
10081 * @returns Strict VBox status code.
10082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10083 * @param iSegReg The index of the segment register to use for
10084 * this access. The base and limits are checked.
10085 * @param GCPtrMem The address of the guest memory.
10086 * @param u128Value The value to store.
10087 */
10088IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10089{
10090 /* The lazy approach for now... */
10091 PRTUINT128U pu128Dst;
10092 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10093 if (rc == VINF_SUCCESS)
10094 {
10095 pu128Dst->au64[0] = u128Value.au64[0];
10096 pu128Dst->au64[1] = u128Value.au64[1];
10097 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10098 }
10099 return rc;
10100}
10101
10102
10103#ifdef IEM_WITH_SETJMP
10104/**
10105 * Stores a data dqword, longjmp on error.
10106 *
10107 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10108 * @param iSegReg The index of the segment register to use for
10109 * this access. The base and limits are checked.
10110 * @param GCPtrMem The address of the guest memory.
10111 * @param u128Value The value to store.
10112 */
10113IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10114{
10115 /* The lazy approach for now... */
10116 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10117 pu128Dst->au64[0] = u128Value.au64[0];
10118 pu128Dst->au64[1] = u128Value.au64[1];
10119 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10120}
10121#endif
10122
10123
10124/**
10125 * Stores a data dqword, SSE aligned.
10126 *
10127 * @returns Strict VBox status code.
10128 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10129 * @param iSegReg The index of the segment register to use for
10130 * this access. The base and limits are checked.
10131 * @param GCPtrMem The address of the guest memory.
10132 * @param u128Value The value to store.
10133 */
10134IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10135{
10136 /* The lazy approach for now... */
10137 if ( (GCPtrMem & 15)
10138 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10139 return iemRaiseGeneralProtectionFault0(pVCpu);
10140
10141 PRTUINT128U pu128Dst;
10142 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10143 if (rc == VINF_SUCCESS)
10144 {
10145 pu128Dst->au64[0] = u128Value.au64[0];
10146 pu128Dst->au64[1] = u128Value.au64[1];
10147 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10148 }
10149 return rc;
10150}
10151
10152
10153#ifdef IEM_WITH_SETJMP
10154/**
10155 * Stores a data dqword, SSE aligned.
10156 *
10157 * @returns Strict VBox status code.
10158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10159 * @param iSegReg The index of the segment register to use for
10160 * this access. The base and limits are checked.
10161 * @param GCPtrMem The address of the guest memory.
10162 * @param u128Value The value to store.
10163 */
10164DECL_NO_INLINE(IEM_STATIC, void)
10165iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10166{
10167 /* The lazy approach for now... */
10168 if ( (GCPtrMem & 15) == 0
10169 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10170 {
10171 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10172 pu128Dst->au64[0] = u128Value.au64[0];
10173 pu128Dst->au64[1] = u128Value.au64[1];
10174 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10175 return;
10176 }
10177
10178 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10179 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10180}
10181#endif
10182
10183
10184/**
10185 * Stores a descriptor register (sgdt, sidt).
10186 *
10187 * @returns Strict VBox status code.
10188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10189 * @param cbLimit The limit.
10190 * @param GCPtrBase The base address.
10191 * @param iSegReg The index of the segment register to use for
10192 * this access. The base and limits are checked.
10193 * @param GCPtrMem The address of the guest memory.
10194 */
10195IEM_STATIC VBOXSTRICTRC
10196iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10197{
10198 VBOXSTRICTRC rcStrict;
10199 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10200 {
10201 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10202 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10203 }
10204
10205 /*
10206 * The SIDT and SGDT instructions actually stores the data using two
10207 * independent writes. The instructions does not respond to opsize prefixes.
10208 */
10209 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10210 if (rcStrict == VINF_SUCCESS)
10211 {
10212 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10213 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10214 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10215 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10216 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10217 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10218 else
10219 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10220 }
10221 return rcStrict;
10222}
10223
10224
10225/**
10226 * Pushes a word onto the stack.
10227 *
10228 * @returns Strict VBox status code.
10229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10230 * @param u16Value The value to push.
10231 */
10232IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10233{
10234 /* Increment the stack pointer. */
10235 uint64_t uNewRsp;
10236 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10237 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10238
10239 /* Write the word the lazy way. */
10240 uint16_t *pu16Dst;
10241 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10242 if (rc == VINF_SUCCESS)
10243 {
10244 *pu16Dst = u16Value;
10245 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10246 }
10247
10248 /* Commit the new RSP value unless we an access handler made trouble. */
10249 if (rc == VINF_SUCCESS)
10250 pCtx->rsp = uNewRsp;
10251
10252 return rc;
10253}
10254
10255
10256/**
10257 * Pushes a dword onto the stack.
10258 *
10259 * @returns Strict VBox status code.
10260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10261 * @param u32Value The value to push.
10262 */
10263IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10264{
10265 /* Increment the stack pointer. */
10266 uint64_t uNewRsp;
10267 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10268 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10269
10270 /* Write the dword the lazy way. */
10271 uint32_t *pu32Dst;
10272 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10273 if (rc == VINF_SUCCESS)
10274 {
10275 *pu32Dst = u32Value;
10276 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10277 }
10278
10279 /* Commit the new RSP value unless we an access handler made trouble. */
10280 if (rc == VINF_SUCCESS)
10281 pCtx->rsp = uNewRsp;
10282
10283 return rc;
10284}
10285
10286
10287/**
10288 * Pushes a dword segment register value onto the stack.
10289 *
10290 * @returns Strict VBox status code.
10291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10292 * @param u32Value The value to push.
10293 */
10294IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10295{
10296 /* Increment the stack pointer. */
10297 uint64_t uNewRsp;
10298 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10299 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10300
10301 VBOXSTRICTRC rc;
10302 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10303 {
10304 /* The recompiler writes a full dword. */
10305 uint32_t *pu32Dst;
10306 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10307 if (rc == VINF_SUCCESS)
10308 {
10309 *pu32Dst = u32Value;
10310 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10311 }
10312 }
10313 else
10314 {
10315 /* The intel docs talks about zero extending the selector register
10316 value. My actual intel CPU here might be zero extending the value
10317 but it still only writes the lower word... */
10318 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10319 * happens when crossing an electric page boundrary, is the high word checked
10320 * for write accessibility or not? Probably it is. What about segment limits?
10321 * It appears this behavior is also shared with trap error codes.
10322 *
10323 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10324 * ancient hardware when it actually did change. */
10325 uint16_t *pu16Dst;
10326 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10327 if (rc == VINF_SUCCESS)
10328 {
10329 *pu16Dst = (uint16_t)u32Value;
10330 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10331 }
10332 }
10333
10334 /* Commit the new RSP value unless we an access handler made trouble. */
10335 if (rc == VINF_SUCCESS)
10336 pCtx->rsp = uNewRsp;
10337
10338 return rc;
10339}
10340
10341
10342/**
10343 * Pushes a qword onto the stack.
10344 *
10345 * @returns Strict VBox status code.
10346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10347 * @param u64Value The value to push.
10348 */
10349IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10350{
10351 /* Increment the stack pointer. */
10352 uint64_t uNewRsp;
10353 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10354 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10355
10356 /* Write the word the lazy way. */
10357 uint64_t *pu64Dst;
10358 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10359 if (rc == VINF_SUCCESS)
10360 {
10361 *pu64Dst = u64Value;
10362 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10363 }
10364
10365 /* Commit the new RSP value unless we an access handler made trouble. */
10366 if (rc == VINF_SUCCESS)
10367 pCtx->rsp = uNewRsp;
10368
10369 return rc;
10370}
10371
10372
10373/**
10374 * Pops a word from the stack.
10375 *
10376 * @returns Strict VBox status code.
10377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10378 * @param pu16Value Where to store the popped value.
10379 */
10380IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10381{
10382 /* Increment the stack pointer. */
10383 uint64_t uNewRsp;
10384 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10385 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10386
10387 /* Write the word the lazy way. */
10388 uint16_t const *pu16Src;
10389 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10390 if (rc == VINF_SUCCESS)
10391 {
10392 *pu16Value = *pu16Src;
10393 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10394
10395 /* Commit the new RSP value. */
10396 if (rc == VINF_SUCCESS)
10397 pCtx->rsp = uNewRsp;
10398 }
10399
10400 return rc;
10401}
10402
10403
10404/**
10405 * Pops a dword from the stack.
10406 *
10407 * @returns Strict VBox status code.
10408 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10409 * @param pu32Value Where to store the popped value.
10410 */
10411IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10412{
10413 /* Increment the stack pointer. */
10414 uint64_t uNewRsp;
10415 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10416 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10417
10418 /* Write the word the lazy way. */
10419 uint32_t const *pu32Src;
10420 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10421 if (rc == VINF_SUCCESS)
10422 {
10423 *pu32Value = *pu32Src;
10424 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10425
10426 /* Commit the new RSP value. */
10427 if (rc == VINF_SUCCESS)
10428 pCtx->rsp = uNewRsp;
10429 }
10430
10431 return rc;
10432}
10433
10434
10435/**
10436 * Pops a qword from the stack.
10437 *
10438 * @returns Strict VBox status code.
10439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10440 * @param pu64Value Where to store the popped value.
10441 */
10442IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10443{
10444 /* Increment the stack pointer. */
10445 uint64_t uNewRsp;
10446 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10447 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10448
10449 /* Write the word the lazy way. */
10450 uint64_t const *pu64Src;
10451 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10452 if (rc == VINF_SUCCESS)
10453 {
10454 *pu64Value = *pu64Src;
10455 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10456
10457 /* Commit the new RSP value. */
10458 if (rc == VINF_SUCCESS)
10459 pCtx->rsp = uNewRsp;
10460 }
10461
10462 return rc;
10463}
10464
10465
10466/**
10467 * Pushes a word onto the stack, using a temporary stack pointer.
10468 *
10469 * @returns Strict VBox status code.
10470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10471 * @param u16Value The value to push.
10472 * @param pTmpRsp Pointer to the temporary stack pointer.
10473 */
10474IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10475{
10476 /* Increment the stack pointer. */
10477 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10478 RTUINT64U NewRsp = *pTmpRsp;
10479 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10480
10481 /* Write the word the lazy way. */
10482 uint16_t *pu16Dst;
10483 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10484 if (rc == VINF_SUCCESS)
10485 {
10486 *pu16Dst = u16Value;
10487 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10488 }
10489
10490 /* Commit the new RSP value unless we an access handler made trouble. */
10491 if (rc == VINF_SUCCESS)
10492 *pTmpRsp = NewRsp;
10493
10494 return rc;
10495}
10496
10497
10498/**
10499 * Pushes a dword onto the stack, using a temporary stack pointer.
10500 *
10501 * @returns Strict VBox status code.
10502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10503 * @param u32Value The value to push.
10504 * @param pTmpRsp Pointer to the temporary stack pointer.
10505 */
10506IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10507{
10508 /* Increment the stack pointer. */
10509 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10510 RTUINT64U NewRsp = *pTmpRsp;
10511 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10512
10513 /* Write the word the lazy way. */
10514 uint32_t *pu32Dst;
10515 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10516 if (rc == VINF_SUCCESS)
10517 {
10518 *pu32Dst = u32Value;
10519 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10520 }
10521
10522 /* Commit the new RSP value unless we an access handler made trouble. */
10523 if (rc == VINF_SUCCESS)
10524 *pTmpRsp = NewRsp;
10525
10526 return rc;
10527}
10528
10529
10530/**
10531 * Pushes a dword onto the stack, using a temporary stack pointer.
10532 *
10533 * @returns Strict VBox status code.
10534 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10535 * @param u64Value The value to push.
10536 * @param pTmpRsp Pointer to the temporary stack pointer.
10537 */
10538IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10539{
10540 /* Increment the stack pointer. */
10541 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10542 RTUINT64U NewRsp = *pTmpRsp;
10543 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10544
10545 /* Write the word the lazy way. */
10546 uint64_t *pu64Dst;
10547 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10548 if (rc == VINF_SUCCESS)
10549 {
10550 *pu64Dst = u64Value;
10551 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10552 }
10553
10554 /* Commit the new RSP value unless we an access handler made trouble. */
10555 if (rc == VINF_SUCCESS)
10556 *pTmpRsp = NewRsp;
10557
10558 return rc;
10559}
10560
10561
10562/**
10563 * Pops a word from the stack, using a temporary stack pointer.
10564 *
10565 * @returns Strict VBox status code.
10566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10567 * @param pu16Value Where to store the popped value.
10568 * @param pTmpRsp Pointer to the temporary stack pointer.
10569 */
10570IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10571{
10572 /* Increment the stack pointer. */
10573 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10574 RTUINT64U NewRsp = *pTmpRsp;
10575 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10576
10577 /* Write the word the lazy way. */
10578 uint16_t const *pu16Src;
10579 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10580 if (rc == VINF_SUCCESS)
10581 {
10582 *pu16Value = *pu16Src;
10583 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10584
10585 /* Commit the new RSP value. */
10586 if (rc == VINF_SUCCESS)
10587 *pTmpRsp = NewRsp;
10588 }
10589
10590 return rc;
10591}
10592
10593
10594/**
10595 * Pops a dword from the stack, using a temporary stack pointer.
10596 *
10597 * @returns Strict VBox status code.
10598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10599 * @param pu32Value Where to store the popped value.
10600 * @param pTmpRsp Pointer to the temporary stack pointer.
10601 */
10602IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10603{
10604 /* Increment the stack pointer. */
10605 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10606 RTUINT64U NewRsp = *pTmpRsp;
10607 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10608
10609 /* Write the word the lazy way. */
10610 uint32_t const *pu32Src;
10611 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10612 if (rc == VINF_SUCCESS)
10613 {
10614 *pu32Value = *pu32Src;
10615 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10616
10617 /* Commit the new RSP value. */
10618 if (rc == VINF_SUCCESS)
10619 *pTmpRsp = NewRsp;
10620 }
10621
10622 return rc;
10623}
10624
10625
10626/**
10627 * Pops a qword from the stack, using a temporary stack pointer.
10628 *
10629 * @returns Strict VBox status code.
10630 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10631 * @param pu64Value Where to store the popped value.
10632 * @param pTmpRsp Pointer to the temporary stack pointer.
10633 */
10634IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10635{
10636 /* Increment the stack pointer. */
10637 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10638 RTUINT64U NewRsp = *pTmpRsp;
10639 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10640
10641 /* Write the word the lazy way. */
10642 uint64_t const *pu64Src;
10643 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10644 if (rcStrict == VINF_SUCCESS)
10645 {
10646 *pu64Value = *pu64Src;
10647 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10648
10649 /* Commit the new RSP value. */
10650 if (rcStrict == VINF_SUCCESS)
10651 *pTmpRsp = NewRsp;
10652 }
10653
10654 return rcStrict;
10655}
10656
10657
10658/**
10659 * Begin a special stack push (used by interrupt, exceptions and such).
10660 *
10661 * This will raise \#SS or \#PF if appropriate.
10662 *
10663 * @returns Strict VBox status code.
10664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10665 * @param cbMem The number of bytes to push onto the stack.
10666 * @param ppvMem Where to return the pointer to the stack memory.
10667 * As with the other memory functions this could be
10668 * direct access or bounce buffered access, so
10669 * don't commit register until the commit call
10670 * succeeds.
10671 * @param puNewRsp Where to return the new RSP value. This must be
10672 * passed unchanged to
10673 * iemMemStackPushCommitSpecial().
10674 */
10675IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10676{
10677 Assert(cbMem < UINT8_MAX);
10678 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10679 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10680 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10681}
10682
10683
10684/**
10685 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10686 *
10687 * This will update the rSP.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10691 * @param pvMem The pointer returned by
10692 * iemMemStackPushBeginSpecial().
10693 * @param uNewRsp The new RSP value returned by
10694 * iemMemStackPushBeginSpecial().
10695 */
10696IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10697{
10698 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10699 if (rcStrict == VINF_SUCCESS)
10700 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10701 return rcStrict;
10702}
10703
10704
10705/**
10706 * Begin a special stack pop (used by iret, retf and such).
10707 *
10708 * This will raise \#SS or \#PF if appropriate.
10709 *
10710 * @returns Strict VBox status code.
10711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10712 * @param cbMem The number of bytes to pop from the stack.
10713 * @param ppvMem Where to return the pointer to the stack memory.
10714 * @param puNewRsp Where to return the new RSP value. This must be
10715 * assigned to CPUMCTX::rsp manually some time
10716 * after iemMemStackPopDoneSpecial() has been
10717 * called.
10718 */
10719IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10720{
10721 Assert(cbMem < UINT8_MAX);
10722 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10723 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10724 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10725}
10726
10727
10728/**
10729 * Continue a special stack pop (used by iret and retf).
10730 *
10731 * This will raise \#SS or \#PF if appropriate.
10732 *
10733 * @returns Strict VBox status code.
10734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10735 * @param cbMem The number of bytes to pop from the stack.
10736 * @param ppvMem Where to return the pointer to the stack memory.
10737 * @param puNewRsp Where to return the new RSP value. This must be
10738 * assigned to CPUMCTX::rsp manually some time
10739 * after iemMemStackPopDoneSpecial() has been
10740 * called.
10741 */
10742IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10743{
10744 Assert(cbMem < UINT8_MAX);
10745 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10746 RTUINT64U NewRsp;
10747 NewRsp.u = *puNewRsp;
10748 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10749 *puNewRsp = NewRsp.u;
10750 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10751}
10752
10753
10754/**
10755 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10756 * iemMemStackPopContinueSpecial).
10757 *
10758 * The caller will manually commit the rSP.
10759 *
10760 * @returns Strict VBox status code.
10761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10762 * @param pvMem The pointer returned by
10763 * iemMemStackPopBeginSpecial() or
10764 * iemMemStackPopContinueSpecial().
10765 */
10766IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10767{
10768 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10769}
10770
10771
10772/**
10773 * Fetches a system table byte.
10774 *
10775 * @returns Strict VBox status code.
10776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10777 * @param pbDst Where to return the byte.
10778 * @param iSegReg The index of the segment register to use for
10779 * this access. The base and limits are checked.
10780 * @param GCPtrMem The address of the guest memory.
10781 */
10782IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10783{
10784 /* The lazy approach for now... */
10785 uint8_t const *pbSrc;
10786 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10787 if (rc == VINF_SUCCESS)
10788 {
10789 *pbDst = *pbSrc;
10790 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10791 }
10792 return rc;
10793}
10794
10795
10796/**
10797 * Fetches a system table word.
10798 *
10799 * @returns Strict VBox status code.
10800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10801 * @param pu16Dst Where to return the word.
10802 * @param iSegReg The index of the segment register to use for
10803 * this access. The base and limits are checked.
10804 * @param GCPtrMem The address of the guest memory.
10805 */
10806IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10807{
10808 /* The lazy approach for now... */
10809 uint16_t const *pu16Src;
10810 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10811 if (rc == VINF_SUCCESS)
10812 {
10813 *pu16Dst = *pu16Src;
10814 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10815 }
10816 return rc;
10817}
10818
10819
10820/**
10821 * Fetches a system table dword.
10822 *
10823 * @returns Strict VBox status code.
10824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10825 * @param pu32Dst Where to return the dword.
10826 * @param iSegReg The index of the segment register to use for
10827 * this access. The base and limits are checked.
10828 * @param GCPtrMem The address of the guest memory.
10829 */
10830IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10831{
10832 /* The lazy approach for now... */
10833 uint32_t const *pu32Src;
10834 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10835 if (rc == VINF_SUCCESS)
10836 {
10837 *pu32Dst = *pu32Src;
10838 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10839 }
10840 return rc;
10841}
10842
10843
10844/**
10845 * Fetches a system table qword.
10846 *
10847 * @returns Strict VBox status code.
10848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10849 * @param pu64Dst Where to return the qword.
10850 * @param iSegReg The index of the segment register to use for
10851 * this access. The base and limits are checked.
10852 * @param GCPtrMem The address of the guest memory.
10853 */
10854IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10855{
10856 /* The lazy approach for now... */
10857 uint64_t const *pu64Src;
10858 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10859 if (rc == VINF_SUCCESS)
10860 {
10861 *pu64Dst = *pu64Src;
10862 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10863 }
10864 return rc;
10865}
10866
10867
10868/**
10869 * Fetches a descriptor table entry with caller specified error code.
10870 *
10871 * @returns Strict VBox status code.
10872 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10873 * @param pDesc Where to return the descriptor table entry.
10874 * @param uSel The selector which table entry to fetch.
10875 * @param uXcpt The exception to raise on table lookup error.
10876 * @param uErrorCode The error code associated with the exception.
10877 */
10878IEM_STATIC VBOXSTRICTRC
10879iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10880{
10881 AssertPtr(pDesc);
10882 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10883
10884 /** @todo did the 286 require all 8 bytes to be accessible? */
10885 /*
10886 * Get the selector table base and check bounds.
10887 */
10888 RTGCPTR GCPtrBase;
10889 if (uSel & X86_SEL_LDT)
10890 {
10891 if ( !pCtx->ldtr.Attr.n.u1Present
10892 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10893 {
10894 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10895 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10896 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10897 uErrorCode, 0);
10898 }
10899
10900 Assert(pCtx->ldtr.Attr.n.u1Present);
10901 GCPtrBase = pCtx->ldtr.u64Base;
10902 }
10903 else
10904 {
10905 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10906 {
10907 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10908 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10909 uErrorCode, 0);
10910 }
10911 GCPtrBase = pCtx->gdtr.pGdt;
10912 }
10913
10914 /*
10915 * Read the legacy descriptor and maybe the long mode extensions if
10916 * required.
10917 */
10918 VBOXSTRICTRC rcStrict;
10919 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10920 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10921 else
10922 {
10923 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10924 if (rcStrict == VINF_SUCCESS)
10925 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10926 if (rcStrict == VINF_SUCCESS)
10927 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10928 if (rcStrict == VINF_SUCCESS)
10929 pDesc->Legacy.au16[3] = 0;
10930 else
10931 return rcStrict;
10932 }
10933
10934 if (rcStrict == VINF_SUCCESS)
10935 {
10936 if ( !IEM_IS_LONG_MODE(pVCpu)
10937 || pDesc->Legacy.Gen.u1DescType)
10938 pDesc->Long.au64[1] = 0;
10939 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10940 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10941 else
10942 {
10943 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10944 /** @todo is this the right exception? */
10945 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10946 }
10947 }
10948 return rcStrict;
10949}
10950
10951
10952/**
10953 * Fetches a descriptor table entry.
10954 *
10955 * @returns Strict VBox status code.
10956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10957 * @param pDesc Where to return the descriptor table entry.
10958 * @param uSel The selector which table entry to fetch.
10959 * @param uXcpt The exception to raise on table lookup error.
10960 */
10961IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10962{
10963 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10964}
10965
10966
10967/**
10968 * Fakes a long mode stack selector for SS = 0.
10969 *
10970 * @param pDescSs Where to return the fake stack descriptor.
10971 * @param uDpl The DPL we want.
10972 */
10973IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10974{
10975 pDescSs->Long.au64[0] = 0;
10976 pDescSs->Long.au64[1] = 0;
10977 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10978 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10979 pDescSs->Long.Gen.u2Dpl = uDpl;
10980 pDescSs->Long.Gen.u1Present = 1;
10981 pDescSs->Long.Gen.u1Long = 1;
10982}
10983
10984
10985/**
10986 * Marks the selector descriptor as accessed (only non-system descriptors).
10987 *
10988 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10989 * will therefore skip the limit checks.
10990 *
10991 * @returns Strict VBox status code.
10992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10993 * @param uSel The selector.
10994 */
10995IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10996{
10997 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10998
10999 /*
11000 * Get the selector table base and calculate the entry address.
11001 */
11002 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11003 ? pCtx->ldtr.u64Base
11004 : pCtx->gdtr.pGdt;
11005 GCPtr += uSel & X86_SEL_MASK;
11006
11007 /*
11008 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11009 * ugly stuff to avoid this. This will make sure it's an atomic access
11010 * as well more or less remove any question about 8-bit or 32-bit accesss.
11011 */
11012 VBOXSTRICTRC rcStrict;
11013 uint32_t volatile *pu32;
11014 if ((GCPtr & 3) == 0)
11015 {
11016 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11017 GCPtr += 2 + 2;
11018 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11019 if (rcStrict != VINF_SUCCESS)
11020 return rcStrict;
11021 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11022 }
11023 else
11024 {
11025 /* The misaligned GDT/LDT case, map the whole thing. */
11026 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11027 if (rcStrict != VINF_SUCCESS)
11028 return rcStrict;
11029 switch ((uintptr_t)pu32 & 3)
11030 {
11031 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11032 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11033 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11034 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11035 }
11036 }
11037
11038 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11039}
11040
11041/** @} */
11042
11043
11044/*
11045 * Include the C/C++ implementation of instruction.
11046 */
11047#include "IEMAllCImpl.cpp.h"
11048
11049
11050
11051/** @name "Microcode" macros.
11052 *
11053 * The idea is that we should be able to use the same code to interpret
11054 * instructions as well as recompiler instructions. Thus this obfuscation.
11055 *
11056 * @{
11057 */
11058#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11059#define IEM_MC_END() }
11060#define IEM_MC_PAUSE() do {} while (0)
11061#define IEM_MC_CONTINUE() do {} while (0)
11062
11063/** Internal macro. */
11064#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11065 do \
11066 { \
11067 VBOXSTRICTRC rcStrict2 = a_Expr; \
11068 if (rcStrict2 != VINF_SUCCESS) \
11069 return rcStrict2; \
11070 } while (0)
11071
11072
11073#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11074#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11075#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11076#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11077#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11078#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11079#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11080#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11081#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11082 do { \
11083 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11084 return iemRaiseDeviceNotAvailable(pVCpu); \
11085 } while (0)
11086#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11087 do { \
11088 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11089 return iemRaiseDeviceNotAvailable(pVCpu); \
11090 } while (0)
11091#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11092 do { \
11093 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11094 return iemRaiseMathFault(pVCpu); \
11095 } while (0)
11096#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11097 do { \
11098 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11099 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11100 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11101 return iemRaiseUndefinedOpcode(pVCpu); \
11102 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11103 return iemRaiseDeviceNotAvailable(pVCpu); \
11104 } while (0)
11105#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11106 do { \
11107 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11108 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11109 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11110 return iemRaiseUndefinedOpcode(pVCpu); \
11111 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11112 return iemRaiseDeviceNotAvailable(pVCpu); \
11113 } while (0)
11114#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11115 do { \
11116 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11117 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11118 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11119 return iemRaiseUndefinedOpcode(pVCpu); \
11120 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11121 return iemRaiseDeviceNotAvailable(pVCpu); \
11122 } while (0)
11123#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11124 do { \
11125 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11126 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11127 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11128 return iemRaiseUndefinedOpcode(pVCpu); \
11129 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11130 return iemRaiseDeviceNotAvailable(pVCpu); \
11131 } while (0)
11132#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11133 do { \
11134 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11135 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11136 return iemRaiseUndefinedOpcode(pVCpu); \
11137 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11138 return iemRaiseDeviceNotAvailable(pVCpu); \
11139 } while (0)
11140#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11141 do { \
11142 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
11143 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11144 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11145 return iemRaiseUndefinedOpcode(pVCpu); \
11146 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11147 return iemRaiseDeviceNotAvailable(pVCpu); \
11148 } while (0)
11149#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11150 do { \
11151 if (pVCpu->iem.s.uCpl != 0) \
11152 return iemRaiseGeneralProtectionFault0(pVCpu); \
11153 } while (0)
11154#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11155 do { \
11156 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11157 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11158 } while (0)
11159
11160
11161#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11162#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11163#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11164#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11165#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11166#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11167#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11168 uint32_t a_Name; \
11169 uint32_t *a_pName = &a_Name
11170#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11171 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
11172
11173#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11174#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11175
11176#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11177#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11178#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11179#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11180#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11181#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11182#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11183#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11184#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11185#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11186#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11187#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11188#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11189#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11190#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11191#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11192#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11193#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11194#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11195#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11196#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11197#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11198#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11199#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11200#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11201#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11202#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11203#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11204#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11205/** @note Not for IOPL or IF testing or modification. */
11206#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11207#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11208#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11209#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11210
11211#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11212#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11213#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11214#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11215#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11216#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11217#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11218#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11219#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11220#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11221#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11222 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11223
11224#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11225#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11226/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11227 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11228#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11229#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11230/** @note Not for IOPL or IF testing or modification. */
11231#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11232
11233#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11234#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11235#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11236 do { \
11237 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11238 *pu32Reg += (a_u32Value); \
11239 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11240 } while (0)
11241#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11242
11243#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11244#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11245#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11246 do { \
11247 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11248 *pu32Reg -= (a_u32Value); \
11249 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11250 } while (0)
11251#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11252#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11253
11254#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11255#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11256#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11257#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11258#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11259#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11260#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11261
11262#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11263#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11264#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11265#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11266
11267#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11268#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11269#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11270
11271#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11272#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11273#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11274
11275#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11276#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11277#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11278
11279#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11280#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11281#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11282
11283#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11284
11285#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11286
11287#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11288#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11289#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11290 do { \
11291 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11292 *pu32Reg &= (a_u32Value); \
11293 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11294 } while (0)
11295#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11296
11297#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11298#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11299#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11300 do { \
11301 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11302 *pu32Reg |= (a_u32Value); \
11303 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11304 } while (0)
11305#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11306
11307
11308/** @note Not for IOPL or IF modification. */
11309#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11310/** @note Not for IOPL or IF modification. */
11311#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11312/** @note Not for IOPL or IF modification. */
11313#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11314
11315#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11316
11317/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11318#define IEM_MC_FPU_TO_MMX_MODE() do { \
11319 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11320 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11321 } while (0)
11322
11323#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11324 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11325#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11326 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11327#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11328 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11329 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11330 } while (0)
11331#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11332 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11333 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11334 } while (0)
11335#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11336 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11337#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11338 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11339#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11340 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11341
11342#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11343 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11344 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11345 } while (0)
11346#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11347 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11348#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11349 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11350#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11351 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11352#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11353 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11354 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11355 } while (0)
11356#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11357 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11358#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11359 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11360 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11361 } while (0)
11362#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11363 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11364#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11365 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11366 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11367 } while (0)
11368#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11369 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11370#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11371 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11372#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11373 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11374#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11375 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11376#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11377 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11378 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11379 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11380 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11381 } while (0)
11382
11383#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11384#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11385 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11386 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11387 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11388 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11389 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11390 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11391 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11392 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11393 } while (0)
11394#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11395 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11396 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11397 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11398 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11399 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11400 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11401 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11402 } while (0)
11403#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11404 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11405 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11406 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11407 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11408 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11409 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11410 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11411 } while (0)
11412#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11413 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11414 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11415 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11416 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11417 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11418 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11419 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11420 } while (0)
11421#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11422 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11423 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11424 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11425 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11426 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11427 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11428 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11429 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11430 } while (0)
11431#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11432 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11433 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11434 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11435 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11436 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11437 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11438 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11439 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11440 } while (0)
11441#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11442 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11443 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11444 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11445 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11446 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11447 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11448 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11449 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11450 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11451 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11452 } while (0)
11453#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11454 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11455 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11456 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11457 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11458 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11459 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11460 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11461 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11462 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, a_iYRegDst); \
11463 } while (0)
11464
11465#ifndef IEM_WITH_SETJMP
11466# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11467 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11468# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11469 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11470# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11471 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11472#else
11473# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11474 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11475# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11476 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11477# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11478 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11479#endif
11480
11481#ifndef IEM_WITH_SETJMP
11482# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11483 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11484# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11485 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11486# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11487 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11488#else
11489# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11490 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11491# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11492 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11493# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11494 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11495#endif
11496
11497#ifndef IEM_WITH_SETJMP
11498# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11499 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11500# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11501 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11502# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11503 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11504#else
11505# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11506 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11507# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11508 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11509# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11510 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11511#endif
11512
11513#ifdef SOME_UNUSED_FUNCTION
11514# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11515 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11516#endif
11517
11518#ifndef IEM_WITH_SETJMP
11519# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11520 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11521# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11522 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11523# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11524 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11525# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11526 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11527#else
11528# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11529 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11530# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11531 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11532# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11533 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11534# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11535 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11536#endif
11537
11538#ifndef IEM_WITH_SETJMP
11539# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11540 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11541# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11542 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11543# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11544 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11545#else
11546# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11547 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11548# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11549 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11550# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11551 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11552#endif
11553
11554#ifndef IEM_WITH_SETJMP
11555# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11556 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11557# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11558 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11559#else
11560# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11561 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11562# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11563 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11564#endif
11565
11566#ifndef IEM_WITH_SETJMP
11567# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11568 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11569# define IEM_MC_FETCH_MEM_U256_ALIGN_SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \
11570 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11571#else
11572# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11573 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11574# define IEM_MC_FETCH_MEM_U256_ALIGN_SSE(a_u256Dst, a_iSeg, a_GCPtrMem) \
11575 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11576#endif
11577
11578
11579
11580#ifndef IEM_WITH_SETJMP
11581# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11582 do { \
11583 uint8_t u8Tmp; \
11584 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11585 (a_u16Dst) = u8Tmp; \
11586 } while (0)
11587# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11588 do { \
11589 uint8_t u8Tmp; \
11590 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11591 (a_u32Dst) = u8Tmp; \
11592 } while (0)
11593# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11594 do { \
11595 uint8_t u8Tmp; \
11596 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11597 (a_u64Dst) = u8Tmp; \
11598 } while (0)
11599# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11600 do { \
11601 uint16_t u16Tmp; \
11602 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11603 (a_u32Dst) = u16Tmp; \
11604 } while (0)
11605# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11606 do { \
11607 uint16_t u16Tmp; \
11608 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11609 (a_u64Dst) = u16Tmp; \
11610 } while (0)
11611# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11612 do { \
11613 uint32_t u32Tmp; \
11614 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11615 (a_u64Dst) = u32Tmp; \
11616 } while (0)
11617#else /* IEM_WITH_SETJMP */
11618# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11619 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11620# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11621 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11622# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11623 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11624# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11625 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11626# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11627 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11628# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11629 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11630#endif /* IEM_WITH_SETJMP */
11631
11632#ifndef IEM_WITH_SETJMP
11633# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11634 do { \
11635 uint8_t u8Tmp; \
11636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11637 (a_u16Dst) = (int8_t)u8Tmp; \
11638 } while (0)
11639# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11640 do { \
11641 uint8_t u8Tmp; \
11642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11643 (a_u32Dst) = (int8_t)u8Tmp; \
11644 } while (0)
11645# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11646 do { \
11647 uint8_t u8Tmp; \
11648 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11649 (a_u64Dst) = (int8_t)u8Tmp; \
11650 } while (0)
11651# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11652 do { \
11653 uint16_t u16Tmp; \
11654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11655 (a_u32Dst) = (int16_t)u16Tmp; \
11656 } while (0)
11657# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11658 do { \
11659 uint16_t u16Tmp; \
11660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11661 (a_u64Dst) = (int16_t)u16Tmp; \
11662 } while (0)
11663# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11664 do { \
11665 uint32_t u32Tmp; \
11666 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11667 (a_u64Dst) = (int32_t)u32Tmp; \
11668 } while (0)
11669#else /* IEM_WITH_SETJMP */
11670# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11671 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11672# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11673 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11674# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11675 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11676# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11677 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11679 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11680# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11681 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11682#endif /* IEM_WITH_SETJMP */
11683
11684#ifndef IEM_WITH_SETJMP
11685# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11687# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11688 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11689# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11691# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11693#else
11694# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11695 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11696# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11697 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11698# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11699 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11700# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11701 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11702#endif
11703
11704#ifndef IEM_WITH_SETJMP
11705# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11707# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11709# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11710 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11711# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11712 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11713#else
11714# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11715 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11716# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11717 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11718# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11719 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11720# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11721 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11722#endif
11723
11724#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11725#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11726#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11727#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11728#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11729#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11730#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11731 do { \
11732 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11733 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11734 } while (0)
11735
11736#ifndef IEM_WITH_SETJMP
11737# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11739# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11740 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11741#else
11742# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11743 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11744# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11745 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11746#endif
11747
11748
11749#define IEM_MC_PUSH_U16(a_u16Value) \
11750 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11751#define IEM_MC_PUSH_U32(a_u32Value) \
11752 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11753#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11754 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11755#define IEM_MC_PUSH_U64(a_u64Value) \
11756 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11757
11758#define IEM_MC_POP_U16(a_pu16Value) \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11760#define IEM_MC_POP_U32(a_pu32Value) \
11761 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11762#define IEM_MC_POP_U64(a_pu64Value) \
11763 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11764
11765/** Maps guest memory for direct or bounce buffered access.
11766 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11767 * @remarks May return.
11768 */
11769#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11770 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11771
11772/** Maps guest memory for direct or bounce buffered access.
11773 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11774 * @remarks May return.
11775 */
11776#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11778
11779/** Commits the memory and unmaps the guest memory.
11780 * @remarks May return.
11781 */
11782#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11784
11785/** Commits the memory and unmaps the guest memory unless the FPU status word
11786 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11787 * that would cause FLD not to store.
11788 *
11789 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11790 * store, while \#P will not.
11791 *
11792 * @remarks May in theory return - for now.
11793 */
11794#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11795 do { \
11796 if ( !(a_u16FSW & X86_FSW_ES) \
11797 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11798 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11800 } while (0)
11801
11802/** Calculate efficient address from R/M. */
11803#ifndef IEM_WITH_SETJMP
11804# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11805 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11806#else
11807# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11808 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11809#endif
11810
11811#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11812#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11813#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11814#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11815#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11816#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11817#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11818
11819/**
11820 * Defers the rest of the instruction emulation to a C implementation routine
11821 * and returns, only taking the standard parameters.
11822 *
11823 * @param a_pfnCImpl The pointer to the C routine.
11824 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11825 */
11826#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11827
11828/**
11829 * Defers the rest of instruction emulation to a C implementation routine and
11830 * returns, taking one argument in addition to the standard ones.
11831 *
11832 * @param a_pfnCImpl The pointer to the C routine.
11833 * @param a0 The argument.
11834 */
11835#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11836
11837/**
11838 * Defers the rest of the instruction emulation to a C implementation routine
11839 * and returns, taking two arguments in addition to the standard ones.
11840 *
11841 * @param a_pfnCImpl The pointer to the C routine.
11842 * @param a0 The first extra argument.
11843 * @param a1 The second extra argument.
11844 */
11845#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11846
11847/**
11848 * Defers the rest of the instruction emulation to a C implementation routine
11849 * and returns, taking three arguments in addition to the standard ones.
11850 *
11851 * @param a_pfnCImpl The pointer to the C routine.
11852 * @param a0 The first extra argument.
11853 * @param a1 The second extra argument.
11854 * @param a2 The third extra argument.
11855 */
11856#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11857
11858/**
11859 * Defers the rest of the instruction emulation to a C implementation routine
11860 * and returns, taking four arguments in addition to the standard ones.
11861 *
11862 * @param a_pfnCImpl The pointer to the C routine.
11863 * @param a0 The first extra argument.
11864 * @param a1 The second extra argument.
11865 * @param a2 The third extra argument.
11866 * @param a3 The fourth extra argument.
11867 */
11868#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11869
11870/**
11871 * Defers the rest of the instruction emulation to a C implementation routine
11872 * and returns, taking two arguments in addition to the standard ones.
11873 *
11874 * @param a_pfnCImpl The pointer to the C routine.
11875 * @param a0 The first extra argument.
11876 * @param a1 The second extra argument.
11877 * @param a2 The third extra argument.
11878 * @param a3 The fourth extra argument.
11879 * @param a4 The fifth extra argument.
11880 */
11881#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11882
11883/**
11884 * Defers the entire instruction emulation to a C implementation routine and
11885 * returns, only taking the standard parameters.
11886 *
11887 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11888 *
11889 * @param a_pfnCImpl The pointer to the C routine.
11890 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11891 */
11892#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11893
11894/**
11895 * Defers the entire instruction emulation to a C implementation routine and
11896 * returns, taking one argument in addition to the standard ones.
11897 *
11898 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11899 *
11900 * @param a_pfnCImpl The pointer to the C routine.
11901 * @param a0 The argument.
11902 */
11903#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11904
11905/**
11906 * Defers the entire instruction emulation to a C implementation routine and
11907 * returns, taking two arguments in addition to the standard ones.
11908 *
11909 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11910 *
11911 * @param a_pfnCImpl The pointer to the C routine.
11912 * @param a0 The first extra argument.
11913 * @param a1 The second extra argument.
11914 */
11915#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11916
11917/**
11918 * Defers the entire instruction emulation to a C implementation routine and
11919 * returns, taking three arguments in addition to the standard ones.
11920 *
11921 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11922 *
11923 * @param a_pfnCImpl The pointer to the C routine.
11924 * @param a0 The first extra argument.
11925 * @param a1 The second extra argument.
11926 * @param a2 The third extra argument.
11927 */
11928#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11929
11930/**
11931 * Calls a FPU assembly implementation taking one visible argument.
11932 *
11933 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11934 * @param a0 The first extra argument.
11935 */
11936#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11937 do { \
11938 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11939 } while (0)
11940
11941/**
11942 * Calls a FPU assembly implementation taking two visible arguments.
11943 *
11944 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11945 * @param a0 The first extra argument.
11946 * @param a1 The second extra argument.
11947 */
11948#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11949 do { \
11950 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11951 } while (0)
11952
11953/**
11954 * Calls a FPU assembly implementation taking three visible arguments.
11955 *
11956 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11957 * @param a0 The first extra argument.
11958 * @param a1 The second extra argument.
11959 * @param a2 The third extra argument.
11960 */
11961#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11962 do { \
11963 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11964 } while (0)
11965
11966#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11967 do { \
11968 (a_FpuData).FSW = (a_FSW); \
11969 (a_FpuData).r80Result = *(a_pr80Value); \
11970 } while (0)
11971
11972/** Pushes FPU result onto the stack. */
11973#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11974 iemFpuPushResult(pVCpu, &a_FpuData)
11975/** Pushes FPU result onto the stack and sets the FPUDP. */
11976#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11977 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11978
11979/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11980#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11981 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11982
11983/** Stores FPU result in a stack register. */
11984#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11985 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11986/** Stores FPU result in a stack register and pops the stack. */
11987#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11988 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11989/** Stores FPU result in a stack register and sets the FPUDP. */
11990#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11991 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11992/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11993 * stack. */
11994#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11995 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11996
11997/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11998#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11999 iemFpuUpdateOpcodeAndIp(pVCpu)
12000/** Free a stack register (for FFREE and FFREEP). */
12001#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12002 iemFpuStackFree(pVCpu, a_iStReg)
12003/** Increment the FPU stack pointer. */
12004#define IEM_MC_FPU_STACK_INC_TOP() \
12005 iemFpuStackIncTop(pVCpu)
12006/** Decrement the FPU stack pointer. */
12007#define IEM_MC_FPU_STACK_DEC_TOP() \
12008 iemFpuStackDecTop(pVCpu)
12009
12010/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12011#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12012 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12013/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12014#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12015 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12016/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12017#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12018 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12019/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12020#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12021 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12022/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12023 * stack. */
12024#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12025 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12026/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12027#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12028 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12029
12030/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12031#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12032 iemFpuStackUnderflow(pVCpu, a_iStDst)
12033/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12034 * stack. */
12035#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12036 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12037/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12038 * FPUDS. */
12039#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12040 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12041/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12042 * FPUDS. Pops stack. */
12043#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12044 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12045/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12046 * stack twice. */
12047#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12048 iemFpuStackUnderflowThenPopPop(pVCpu)
12049/** Raises a FPU stack underflow exception for an instruction pushing a result
12050 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12051#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12052 iemFpuStackPushUnderflow(pVCpu)
12053/** Raises a FPU stack underflow exception for an instruction pushing a result
12054 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12055#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12056 iemFpuStackPushUnderflowTwo(pVCpu)
12057
12058/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12059 * FPUIP, FPUCS and FOP. */
12060#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12061 iemFpuStackPushOverflow(pVCpu)
12062/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12063 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12064#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12065 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12066/** Prepares for using the FPU state.
12067 * Ensures that we can use the host FPU in the current context (RC+R0.
12068 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12069#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12070/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12071#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12072/** Actualizes the guest FPU state so it can be accessed and modified. */
12073#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12074
12075/** Prepares for using the SSE state.
12076 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12077 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12078#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12079/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12080#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12081/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12082#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12083
12084/** Prepares for using the AVX state.
12085 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12086 * Ensures the guest AVX state in the CPUMCTX is up to date.
12087 * @note This will include the AVX512 state too when support for it is added
12088 * due to the zero extending feature of VEX instruction. */
12089#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12090/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12091#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12092/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12093#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12094
12095/**
12096 * Calls a MMX assembly implementation taking two visible arguments.
12097 *
12098 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12099 * @param a0 The first extra argument.
12100 * @param a1 The second extra argument.
12101 */
12102#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12103 do { \
12104 IEM_MC_PREPARE_FPU_USAGE(); \
12105 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12106 } while (0)
12107
12108/**
12109 * Calls a MMX assembly implementation taking three visible arguments.
12110 *
12111 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12112 * @param a0 The first extra argument.
12113 * @param a1 The second extra argument.
12114 * @param a2 The third extra argument.
12115 */
12116#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12117 do { \
12118 IEM_MC_PREPARE_FPU_USAGE(); \
12119 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12120 } while (0)
12121
12122
12123/**
12124 * Calls a SSE assembly implementation taking two visible arguments.
12125 *
12126 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12127 * @param a0 The first extra argument.
12128 * @param a1 The second extra argument.
12129 */
12130#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12131 do { \
12132 IEM_MC_PREPARE_SSE_USAGE(); \
12133 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12134 } while (0)
12135
12136/**
12137 * Calls a SSE assembly implementation taking three visible arguments.
12138 *
12139 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12140 * @param a0 The first extra argument.
12141 * @param a1 The second extra argument.
12142 * @param a2 The third extra argument.
12143 */
12144#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12145 do { \
12146 IEM_MC_PREPARE_SSE_USAGE(); \
12147 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12148 } while (0)
12149
12150/** @note Not for IOPL or IF testing. */
12151#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12152/** @note Not for IOPL or IF testing. */
12153#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12154/** @note Not for IOPL or IF testing. */
12155#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12156/** @note Not for IOPL or IF testing. */
12157#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12158/** @note Not for IOPL or IF testing. */
12159#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12160 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12161 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12162/** @note Not for IOPL or IF testing. */
12163#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12164 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12165 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12166/** @note Not for IOPL or IF testing. */
12167#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12168 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12169 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12170 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12171/** @note Not for IOPL or IF testing. */
12172#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12173 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12174 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12175 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12176#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12177#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12178#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12179/** @note Not for IOPL or IF testing. */
12180#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12181 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12182 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12183/** @note Not for IOPL or IF testing. */
12184#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12185 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12186 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12187/** @note Not for IOPL or IF testing. */
12188#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12189 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12190 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12191/** @note Not for IOPL or IF testing. */
12192#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12193 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12194 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12195/** @note Not for IOPL or IF testing. */
12196#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12197 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12198 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12199/** @note Not for IOPL or IF testing. */
12200#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12201 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12202 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12203#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12204#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12205
12206#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12207 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12208#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12209 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12210#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12211 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12212#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12213 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12214#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12215 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12216#define IEM_MC_IF_FCW_IM() \
12217 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12218
12219#define IEM_MC_ELSE() } else {
12220#define IEM_MC_ENDIF() } do {} while (0)
12221
12222/** @} */
12223
12224
12225/** @name Opcode Debug Helpers.
12226 * @{
12227 */
12228#ifdef VBOX_WITH_STATISTICS
12229# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12230#else
12231# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12232#endif
12233
12234#ifdef DEBUG
12235# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12236 do { \
12237 IEMOP_INC_STATS(a_Stats); \
12238 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12239 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12240 } while (0)
12241
12242# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12243 do { \
12244 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12245 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12246 (void)RT_CONCAT(OP_,a_Upper); \
12247 (void)(a_fDisHints); \
12248 (void)(a_fIemHints); \
12249 } while (0)
12250
12251# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12252 do { \
12253 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12254 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12255 (void)RT_CONCAT(OP_,a_Upper); \
12256 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12257 (void)(a_fDisHints); \
12258 (void)(a_fIemHints); \
12259 } while (0)
12260
12261# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12262 do { \
12263 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12264 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12265 (void)RT_CONCAT(OP_,a_Upper); \
12266 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12267 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12268 (void)(a_fDisHints); \
12269 (void)(a_fIemHints); \
12270 } while (0)
12271
12272# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12273 do { \
12274 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12275 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12276 (void)RT_CONCAT(OP_,a_Upper); \
12277 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12278 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12279 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12280 (void)(a_fDisHints); \
12281 (void)(a_fIemHints); \
12282 } while (0)
12283
12284# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12285 do { \
12286 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12287 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12288 (void)RT_CONCAT(OP_,a_Upper); \
12289 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12290 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12291 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12292 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12293 (void)(a_fDisHints); \
12294 (void)(a_fIemHints); \
12295 } while (0)
12296
12297#else
12298# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12299
12300# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12301 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12302# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12303 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12304# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12305 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12306# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12307 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12308# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12309 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12310
12311#endif
12312
12313#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12314 IEMOP_MNEMONIC0EX(a_Lower, \
12315 #a_Lower, \
12316 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12317#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12318 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12319 #a_Lower " " #a_Op1, \
12320 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12321#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12322 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12323 #a_Lower " " #a_Op1 "," #a_Op2, \
12324 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12325#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12326 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12327 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12328 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12329#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12330 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12331 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12332 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12333
12334/** @} */
12335
12336
12337/** @name Opcode Helpers.
12338 * @{
12339 */
12340
12341#ifdef IN_RING3
12342# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12343 do { \
12344 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12345 else \
12346 { \
12347 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12348 return IEMOP_RAISE_INVALID_OPCODE(); \
12349 } \
12350 } while (0)
12351#else
12352# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12353 do { \
12354 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12355 else return IEMOP_RAISE_INVALID_OPCODE(); \
12356 } while (0)
12357#endif
12358
12359/** The instruction requires a 186 or later. */
12360#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12361# define IEMOP_HLP_MIN_186() do { } while (0)
12362#else
12363# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12364#endif
12365
12366/** The instruction requires a 286 or later. */
12367#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12368# define IEMOP_HLP_MIN_286() do { } while (0)
12369#else
12370# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12371#endif
12372
12373/** The instruction requires a 386 or later. */
12374#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12375# define IEMOP_HLP_MIN_386() do { } while (0)
12376#else
12377# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12378#endif
12379
12380/** The instruction requires a 386 or later if the given expression is true. */
12381#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12382# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12383#else
12384# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12385#endif
12386
12387/** The instruction requires a 486 or later. */
12388#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12389# define IEMOP_HLP_MIN_486() do { } while (0)
12390#else
12391# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12392#endif
12393
12394/** The instruction requires a Pentium (586) or later. */
12395#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12396# define IEMOP_HLP_MIN_586() do { } while (0)
12397#else
12398# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12399#endif
12400
12401/** The instruction requires a PentiumPro (686) or later. */
12402#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12403# define IEMOP_HLP_MIN_686() do { } while (0)
12404#else
12405# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12406#endif
12407
12408
12409/** The instruction raises an \#UD in real and V8086 mode. */
12410#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12411 do \
12412 { \
12413 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12414 else return IEMOP_RAISE_INVALID_OPCODE(); \
12415 } while (0)
12416
12417/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12418 * 64-bit mode. */
12419#define IEMOP_HLP_NO_64BIT() \
12420 do \
12421 { \
12422 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12423 return IEMOP_RAISE_INVALID_OPCODE(); \
12424 } while (0)
12425
12426/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12427 * 64-bit mode. */
12428#define IEMOP_HLP_ONLY_64BIT() \
12429 do \
12430 { \
12431 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12432 return IEMOP_RAISE_INVALID_OPCODE(); \
12433 } while (0)
12434
12435/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12436#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12437 do \
12438 { \
12439 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12440 iemRecalEffOpSize64Default(pVCpu); \
12441 } while (0)
12442
12443/** The instruction has 64-bit operand size if 64-bit mode. */
12444#define IEMOP_HLP_64BIT_OP_SIZE() \
12445 do \
12446 { \
12447 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12448 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12449 } while (0)
12450
12451/** Only a REX prefix immediately preceeding the first opcode byte takes
12452 * effect. This macro helps ensuring this as well as logging bad guest code. */
12453#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12454 do \
12455 { \
12456 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12457 { \
12458 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12459 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12460 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12461 pVCpu->iem.s.uRexB = 0; \
12462 pVCpu->iem.s.uRexIndex = 0; \
12463 pVCpu->iem.s.uRexReg = 0; \
12464 iemRecalEffOpSize(pVCpu); \
12465 } \
12466 } while (0)
12467
12468/**
12469 * Done decoding.
12470 */
12471#define IEMOP_HLP_DONE_DECODING() \
12472 do \
12473 { \
12474 /*nothing for now, maybe later... */ \
12475 } while (0)
12476
12477/**
12478 * Done decoding, raise \#UD exception if lock prefix present.
12479 */
12480#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12481 do \
12482 { \
12483 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12484 { /* likely */ } \
12485 else \
12486 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12487 } while (0)
12488
12489
12490/**
12491 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12492 * repnz or size prefixes are present, or if in real or v8086 mode.
12493 */
12494#define IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX() \
12495 do \
12496 { \
12497 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12498 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12499 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12500 { /* likely */ } \
12501 else \
12502 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12503 } while (0)
12504
12505
12506/**
12507 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12508 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12509 * register 0, or if in real or v8086 mode.
12510 */
12511#define IEMOP_HLP_DONE_DECODING_NO_AVX_PREFIX_AND_NO_VVVV() \
12512 do \
12513 { \
12514 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12515 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12516 && !pVCpu->iem.s.uVex3rdReg \
12517 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12518 { /* likely */ } \
12519 else \
12520 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12521 } while (0)
12522
12523#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12524 do \
12525 { \
12526 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12527 { /* likely */ } \
12528 else \
12529 { \
12530 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12531 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12532 } \
12533 } while (0)
12534#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12535 do \
12536 { \
12537 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12538 { /* likely */ } \
12539 else \
12540 { \
12541 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12542 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12543 } \
12544 } while (0)
12545
12546/**
12547 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12548 * are present.
12549 */
12550#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12551 do \
12552 { \
12553 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12554 { /* likely */ } \
12555 else \
12556 return IEMOP_RAISE_INVALID_OPCODE(); \
12557 } while (0)
12558
12559
12560/**
12561 * Done decoding VEX.
12562 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if
12563 * we're in real or v8086 mode.
12564 */
12565#define IEMOP_HLP_DONE_VEX_DECODING() \
12566 do \
12567 { \
12568 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12569 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12570 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12571 { /* likely */ } \
12572 else \
12573 return IEMOP_RAISE_INVALID_OPCODE(); \
12574 } while (0)
12575
12576/**
12577 * Done decoding VEX, no V, no L.
12578 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12579 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12580 */
12581#define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \
12582 do \
12583 { \
12584 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12585 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12586 && pVCpu->iem.s.uVexLength == 0 \
12587 && pVCpu->iem.s.uVex3rdReg == 0 \
12588 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12589 { /* likely */ } \
12590 else \
12591 return IEMOP_RAISE_INVALID_OPCODE(); \
12592 } while (0)
12593
12594#ifdef VBOX_WITH_NESTED_HWVIRT
12595/** Check and handles SVM nested-guest control & instruction intercept. */
12596# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12597 do \
12598 { \
12599 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12600 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12601 } while (0)
12602
12603/** Check and handle SVM nested-guest CR0 read intercept. */
12604# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12605 do \
12606 { \
12607 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12608 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12609 } while (0)
12610
12611#else
12612# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12613# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12614
12615#endif /* VBOX_WITH_NESTED_HWVIRT */
12616
12617
12618/**
12619 * Calculates the effective address of a ModR/M memory operand.
12620 *
12621 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12622 *
12623 * @return Strict VBox status code.
12624 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12625 * @param bRm The ModRM byte.
12626 * @param cbImm The size of any immediate following the
12627 * effective address opcode bytes. Important for
12628 * RIP relative addressing.
12629 * @param pGCPtrEff Where to return the effective address.
12630 */
12631IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12632{
12633 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12634 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12635# define SET_SS_DEF() \
12636 do \
12637 { \
12638 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12639 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12640 } while (0)
12641
12642 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12643 {
12644/** @todo Check the effective address size crap! */
12645 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12646 {
12647 uint16_t u16EffAddr;
12648
12649 /* Handle the disp16 form with no registers first. */
12650 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12651 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12652 else
12653 {
12654 /* Get the displacment. */
12655 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12656 {
12657 case 0: u16EffAddr = 0; break;
12658 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12659 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12660 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12661 }
12662
12663 /* Add the base and index registers to the disp. */
12664 switch (bRm & X86_MODRM_RM_MASK)
12665 {
12666 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12667 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12668 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12669 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12670 case 4: u16EffAddr += pCtx->si; break;
12671 case 5: u16EffAddr += pCtx->di; break;
12672 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12673 case 7: u16EffAddr += pCtx->bx; break;
12674 }
12675 }
12676
12677 *pGCPtrEff = u16EffAddr;
12678 }
12679 else
12680 {
12681 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12682 uint32_t u32EffAddr;
12683
12684 /* Handle the disp32 form with no registers first. */
12685 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12686 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12687 else
12688 {
12689 /* Get the register (or SIB) value. */
12690 switch ((bRm & X86_MODRM_RM_MASK))
12691 {
12692 case 0: u32EffAddr = pCtx->eax; break;
12693 case 1: u32EffAddr = pCtx->ecx; break;
12694 case 2: u32EffAddr = pCtx->edx; break;
12695 case 3: u32EffAddr = pCtx->ebx; break;
12696 case 4: /* SIB */
12697 {
12698 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12699
12700 /* Get the index and scale it. */
12701 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12702 {
12703 case 0: u32EffAddr = pCtx->eax; break;
12704 case 1: u32EffAddr = pCtx->ecx; break;
12705 case 2: u32EffAddr = pCtx->edx; break;
12706 case 3: u32EffAddr = pCtx->ebx; break;
12707 case 4: u32EffAddr = 0; /*none */ break;
12708 case 5: u32EffAddr = pCtx->ebp; break;
12709 case 6: u32EffAddr = pCtx->esi; break;
12710 case 7: u32EffAddr = pCtx->edi; break;
12711 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12712 }
12713 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12714
12715 /* add base */
12716 switch (bSib & X86_SIB_BASE_MASK)
12717 {
12718 case 0: u32EffAddr += pCtx->eax; break;
12719 case 1: u32EffAddr += pCtx->ecx; break;
12720 case 2: u32EffAddr += pCtx->edx; break;
12721 case 3: u32EffAddr += pCtx->ebx; break;
12722 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12723 case 5:
12724 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12725 {
12726 u32EffAddr += pCtx->ebp;
12727 SET_SS_DEF();
12728 }
12729 else
12730 {
12731 uint32_t u32Disp;
12732 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12733 u32EffAddr += u32Disp;
12734 }
12735 break;
12736 case 6: u32EffAddr += pCtx->esi; break;
12737 case 7: u32EffAddr += pCtx->edi; break;
12738 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12739 }
12740 break;
12741 }
12742 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12743 case 6: u32EffAddr = pCtx->esi; break;
12744 case 7: u32EffAddr = pCtx->edi; break;
12745 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12746 }
12747
12748 /* Get and add the displacement. */
12749 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12750 {
12751 case 0:
12752 break;
12753 case 1:
12754 {
12755 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12756 u32EffAddr += i8Disp;
12757 break;
12758 }
12759 case 2:
12760 {
12761 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12762 u32EffAddr += u32Disp;
12763 break;
12764 }
12765 default:
12766 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12767 }
12768
12769 }
12770 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12771 *pGCPtrEff = u32EffAddr;
12772 else
12773 {
12774 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12775 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12776 }
12777 }
12778 }
12779 else
12780 {
12781 uint64_t u64EffAddr;
12782
12783 /* Handle the rip+disp32 form with no registers first. */
12784 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12785 {
12786 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12787 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12788 }
12789 else
12790 {
12791 /* Get the register (or SIB) value. */
12792 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12793 {
12794 case 0: u64EffAddr = pCtx->rax; break;
12795 case 1: u64EffAddr = pCtx->rcx; break;
12796 case 2: u64EffAddr = pCtx->rdx; break;
12797 case 3: u64EffAddr = pCtx->rbx; break;
12798 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12799 case 6: u64EffAddr = pCtx->rsi; break;
12800 case 7: u64EffAddr = pCtx->rdi; break;
12801 case 8: u64EffAddr = pCtx->r8; break;
12802 case 9: u64EffAddr = pCtx->r9; break;
12803 case 10: u64EffAddr = pCtx->r10; break;
12804 case 11: u64EffAddr = pCtx->r11; break;
12805 case 13: u64EffAddr = pCtx->r13; break;
12806 case 14: u64EffAddr = pCtx->r14; break;
12807 case 15: u64EffAddr = pCtx->r15; break;
12808 /* SIB */
12809 case 4:
12810 case 12:
12811 {
12812 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12813
12814 /* Get the index and scale it. */
12815 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12816 {
12817 case 0: u64EffAddr = pCtx->rax; break;
12818 case 1: u64EffAddr = pCtx->rcx; break;
12819 case 2: u64EffAddr = pCtx->rdx; break;
12820 case 3: u64EffAddr = pCtx->rbx; break;
12821 case 4: u64EffAddr = 0; /*none */ break;
12822 case 5: u64EffAddr = pCtx->rbp; break;
12823 case 6: u64EffAddr = pCtx->rsi; break;
12824 case 7: u64EffAddr = pCtx->rdi; break;
12825 case 8: u64EffAddr = pCtx->r8; break;
12826 case 9: u64EffAddr = pCtx->r9; break;
12827 case 10: u64EffAddr = pCtx->r10; break;
12828 case 11: u64EffAddr = pCtx->r11; break;
12829 case 12: u64EffAddr = pCtx->r12; break;
12830 case 13: u64EffAddr = pCtx->r13; break;
12831 case 14: u64EffAddr = pCtx->r14; break;
12832 case 15: u64EffAddr = pCtx->r15; break;
12833 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12834 }
12835 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12836
12837 /* add base */
12838 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12839 {
12840 case 0: u64EffAddr += pCtx->rax; break;
12841 case 1: u64EffAddr += pCtx->rcx; break;
12842 case 2: u64EffAddr += pCtx->rdx; break;
12843 case 3: u64EffAddr += pCtx->rbx; break;
12844 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12845 case 6: u64EffAddr += pCtx->rsi; break;
12846 case 7: u64EffAddr += pCtx->rdi; break;
12847 case 8: u64EffAddr += pCtx->r8; break;
12848 case 9: u64EffAddr += pCtx->r9; break;
12849 case 10: u64EffAddr += pCtx->r10; break;
12850 case 11: u64EffAddr += pCtx->r11; break;
12851 case 12: u64EffAddr += pCtx->r12; break;
12852 case 14: u64EffAddr += pCtx->r14; break;
12853 case 15: u64EffAddr += pCtx->r15; break;
12854 /* complicated encodings */
12855 case 5:
12856 case 13:
12857 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12858 {
12859 if (!pVCpu->iem.s.uRexB)
12860 {
12861 u64EffAddr += pCtx->rbp;
12862 SET_SS_DEF();
12863 }
12864 else
12865 u64EffAddr += pCtx->r13;
12866 }
12867 else
12868 {
12869 uint32_t u32Disp;
12870 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12871 u64EffAddr += (int32_t)u32Disp;
12872 }
12873 break;
12874 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12875 }
12876 break;
12877 }
12878 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12879 }
12880
12881 /* Get and add the displacement. */
12882 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12883 {
12884 case 0:
12885 break;
12886 case 1:
12887 {
12888 int8_t i8Disp;
12889 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12890 u64EffAddr += i8Disp;
12891 break;
12892 }
12893 case 2:
12894 {
12895 uint32_t u32Disp;
12896 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12897 u64EffAddr += (int32_t)u32Disp;
12898 break;
12899 }
12900 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12901 }
12902
12903 }
12904
12905 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12906 *pGCPtrEff = u64EffAddr;
12907 else
12908 {
12909 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12910 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12911 }
12912 }
12913
12914 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12915 return VINF_SUCCESS;
12916}
12917
12918
12919/**
12920 * Calculates the effective address of a ModR/M memory operand.
12921 *
12922 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12923 *
12924 * @return Strict VBox status code.
12925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12926 * @param bRm The ModRM byte.
12927 * @param cbImm The size of any immediate following the
12928 * effective address opcode bytes. Important for
12929 * RIP relative addressing.
12930 * @param pGCPtrEff Where to return the effective address.
12931 * @param offRsp RSP displacement.
12932 */
12933IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12934{
12935 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12936 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12937# define SET_SS_DEF() \
12938 do \
12939 { \
12940 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12941 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12942 } while (0)
12943
12944 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12945 {
12946/** @todo Check the effective address size crap! */
12947 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12948 {
12949 uint16_t u16EffAddr;
12950
12951 /* Handle the disp16 form with no registers first. */
12952 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12953 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12954 else
12955 {
12956 /* Get the displacment. */
12957 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12958 {
12959 case 0: u16EffAddr = 0; break;
12960 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12961 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12962 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12963 }
12964
12965 /* Add the base and index registers to the disp. */
12966 switch (bRm & X86_MODRM_RM_MASK)
12967 {
12968 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12969 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12970 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12971 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12972 case 4: u16EffAddr += pCtx->si; break;
12973 case 5: u16EffAddr += pCtx->di; break;
12974 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12975 case 7: u16EffAddr += pCtx->bx; break;
12976 }
12977 }
12978
12979 *pGCPtrEff = u16EffAddr;
12980 }
12981 else
12982 {
12983 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12984 uint32_t u32EffAddr;
12985
12986 /* Handle the disp32 form with no registers first. */
12987 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12988 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12989 else
12990 {
12991 /* Get the register (or SIB) value. */
12992 switch ((bRm & X86_MODRM_RM_MASK))
12993 {
12994 case 0: u32EffAddr = pCtx->eax; break;
12995 case 1: u32EffAddr = pCtx->ecx; break;
12996 case 2: u32EffAddr = pCtx->edx; break;
12997 case 3: u32EffAddr = pCtx->ebx; break;
12998 case 4: /* SIB */
12999 {
13000 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13001
13002 /* Get the index and scale it. */
13003 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13004 {
13005 case 0: u32EffAddr = pCtx->eax; break;
13006 case 1: u32EffAddr = pCtx->ecx; break;
13007 case 2: u32EffAddr = pCtx->edx; break;
13008 case 3: u32EffAddr = pCtx->ebx; break;
13009 case 4: u32EffAddr = 0; /*none */ break;
13010 case 5: u32EffAddr = pCtx->ebp; break;
13011 case 6: u32EffAddr = pCtx->esi; break;
13012 case 7: u32EffAddr = pCtx->edi; break;
13013 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13014 }
13015 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13016
13017 /* add base */
13018 switch (bSib & X86_SIB_BASE_MASK)
13019 {
13020 case 0: u32EffAddr += pCtx->eax; break;
13021 case 1: u32EffAddr += pCtx->ecx; break;
13022 case 2: u32EffAddr += pCtx->edx; break;
13023 case 3: u32EffAddr += pCtx->ebx; break;
13024 case 4:
13025 u32EffAddr += pCtx->esp + offRsp;
13026 SET_SS_DEF();
13027 break;
13028 case 5:
13029 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13030 {
13031 u32EffAddr += pCtx->ebp;
13032 SET_SS_DEF();
13033 }
13034 else
13035 {
13036 uint32_t u32Disp;
13037 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13038 u32EffAddr += u32Disp;
13039 }
13040 break;
13041 case 6: u32EffAddr += pCtx->esi; break;
13042 case 7: u32EffAddr += pCtx->edi; break;
13043 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13044 }
13045 break;
13046 }
13047 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13048 case 6: u32EffAddr = pCtx->esi; break;
13049 case 7: u32EffAddr = pCtx->edi; break;
13050 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13051 }
13052
13053 /* Get and add the displacement. */
13054 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13055 {
13056 case 0:
13057 break;
13058 case 1:
13059 {
13060 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13061 u32EffAddr += i8Disp;
13062 break;
13063 }
13064 case 2:
13065 {
13066 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13067 u32EffAddr += u32Disp;
13068 break;
13069 }
13070 default:
13071 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13072 }
13073
13074 }
13075 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13076 *pGCPtrEff = u32EffAddr;
13077 else
13078 {
13079 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13080 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13081 }
13082 }
13083 }
13084 else
13085 {
13086 uint64_t u64EffAddr;
13087
13088 /* Handle the rip+disp32 form with no registers first. */
13089 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13090 {
13091 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13092 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13093 }
13094 else
13095 {
13096 /* Get the register (or SIB) value. */
13097 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13098 {
13099 case 0: u64EffAddr = pCtx->rax; break;
13100 case 1: u64EffAddr = pCtx->rcx; break;
13101 case 2: u64EffAddr = pCtx->rdx; break;
13102 case 3: u64EffAddr = pCtx->rbx; break;
13103 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13104 case 6: u64EffAddr = pCtx->rsi; break;
13105 case 7: u64EffAddr = pCtx->rdi; break;
13106 case 8: u64EffAddr = pCtx->r8; break;
13107 case 9: u64EffAddr = pCtx->r9; break;
13108 case 10: u64EffAddr = pCtx->r10; break;
13109 case 11: u64EffAddr = pCtx->r11; break;
13110 case 13: u64EffAddr = pCtx->r13; break;
13111 case 14: u64EffAddr = pCtx->r14; break;
13112 case 15: u64EffAddr = pCtx->r15; break;
13113 /* SIB */
13114 case 4:
13115 case 12:
13116 {
13117 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13118
13119 /* Get the index and scale it. */
13120 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13121 {
13122 case 0: u64EffAddr = pCtx->rax; break;
13123 case 1: u64EffAddr = pCtx->rcx; break;
13124 case 2: u64EffAddr = pCtx->rdx; break;
13125 case 3: u64EffAddr = pCtx->rbx; break;
13126 case 4: u64EffAddr = 0; /*none */ break;
13127 case 5: u64EffAddr = pCtx->rbp; break;
13128 case 6: u64EffAddr = pCtx->rsi; break;
13129 case 7: u64EffAddr = pCtx->rdi; break;
13130 case 8: u64EffAddr = pCtx->r8; break;
13131 case 9: u64EffAddr = pCtx->r9; break;
13132 case 10: u64EffAddr = pCtx->r10; break;
13133 case 11: u64EffAddr = pCtx->r11; break;
13134 case 12: u64EffAddr = pCtx->r12; break;
13135 case 13: u64EffAddr = pCtx->r13; break;
13136 case 14: u64EffAddr = pCtx->r14; break;
13137 case 15: u64EffAddr = pCtx->r15; break;
13138 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13139 }
13140 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13141
13142 /* add base */
13143 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13144 {
13145 case 0: u64EffAddr += pCtx->rax; break;
13146 case 1: u64EffAddr += pCtx->rcx; break;
13147 case 2: u64EffAddr += pCtx->rdx; break;
13148 case 3: u64EffAddr += pCtx->rbx; break;
13149 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13150 case 6: u64EffAddr += pCtx->rsi; break;
13151 case 7: u64EffAddr += pCtx->rdi; break;
13152 case 8: u64EffAddr += pCtx->r8; break;
13153 case 9: u64EffAddr += pCtx->r9; break;
13154 case 10: u64EffAddr += pCtx->r10; break;
13155 case 11: u64EffAddr += pCtx->r11; break;
13156 case 12: u64EffAddr += pCtx->r12; break;
13157 case 14: u64EffAddr += pCtx->r14; break;
13158 case 15: u64EffAddr += pCtx->r15; break;
13159 /* complicated encodings */
13160 case 5:
13161 case 13:
13162 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13163 {
13164 if (!pVCpu->iem.s.uRexB)
13165 {
13166 u64EffAddr += pCtx->rbp;
13167 SET_SS_DEF();
13168 }
13169 else
13170 u64EffAddr += pCtx->r13;
13171 }
13172 else
13173 {
13174 uint32_t u32Disp;
13175 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13176 u64EffAddr += (int32_t)u32Disp;
13177 }
13178 break;
13179 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13180 }
13181 break;
13182 }
13183 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13184 }
13185
13186 /* Get and add the displacement. */
13187 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13188 {
13189 case 0:
13190 break;
13191 case 1:
13192 {
13193 int8_t i8Disp;
13194 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13195 u64EffAddr += i8Disp;
13196 break;
13197 }
13198 case 2:
13199 {
13200 uint32_t u32Disp;
13201 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13202 u64EffAddr += (int32_t)u32Disp;
13203 break;
13204 }
13205 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13206 }
13207
13208 }
13209
13210 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13211 *pGCPtrEff = u64EffAddr;
13212 else
13213 {
13214 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13215 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13216 }
13217 }
13218
13219 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13220 return VINF_SUCCESS;
13221}
13222
13223
13224#ifdef IEM_WITH_SETJMP
13225/**
13226 * Calculates the effective address of a ModR/M memory operand.
13227 *
13228 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13229 *
13230 * May longjmp on internal error.
13231 *
13232 * @return The effective address.
13233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13234 * @param bRm The ModRM byte.
13235 * @param cbImm The size of any immediate following the
13236 * effective address opcode bytes. Important for
13237 * RIP relative addressing.
13238 */
13239IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13240{
13241 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13242 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13243# define SET_SS_DEF() \
13244 do \
13245 { \
13246 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13247 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13248 } while (0)
13249
13250 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13251 {
13252/** @todo Check the effective address size crap! */
13253 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13254 {
13255 uint16_t u16EffAddr;
13256
13257 /* Handle the disp16 form with no registers first. */
13258 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13259 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13260 else
13261 {
13262 /* Get the displacment. */
13263 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13264 {
13265 case 0: u16EffAddr = 0; break;
13266 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13267 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13268 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13269 }
13270
13271 /* Add the base and index registers to the disp. */
13272 switch (bRm & X86_MODRM_RM_MASK)
13273 {
13274 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13275 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13276 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13277 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13278 case 4: u16EffAddr += pCtx->si; break;
13279 case 5: u16EffAddr += pCtx->di; break;
13280 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13281 case 7: u16EffAddr += pCtx->bx; break;
13282 }
13283 }
13284
13285 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13286 return u16EffAddr;
13287 }
13288
13289 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13290 uint32_t u32EffAddr;
13291
13292 /* Handle the disp32 form with no registers first. */
13293 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13294 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13295 else
13296 {
13297 /* Get the register (or SIB) value. */
13298 switch ((bRm & X86_MODRM_RM_MASK))
13299 {
13300 case 0: u32EffAddr = pCtx->eax; break;
13301 case 1: u32EffAddr = pCtx->ecx; break;
13302 case 2: u32EffAddr = pCtx->edx; break;
13303 case 3: u32EffAddr = pCtx->ebx; break;
13304 case 4: /* SIB */
13305 {
13306 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13307
13308 /* Get the index and scale it. */
13309 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13310 {
13311 case 0: u32EffAddr = pCtx->eax; break;
13312 case 1: u32EffAddr = pCtx->ecx; break;
13313 case 2: u32EffAddr = pCtx->edx; break;
13314 case 3: u32EffAddr = pCtx->ebx; break;
13315 case 4: u32EffAddr = 0; /*none */ break;
13316 case 5: u32EffAddr = pCtx->ebp; break;
13317 case 6: u32EffAddr = pCtx->esi; break;
13318 case 7: u32EffAddr = pCtx->edi; break;
13319 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13320 }
13321 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13322
13323 /* add base */
13324 switch (bSib & X86_SIB_BASE_MASK)
13325 {
13326 case 0: u32EffAddr += pCtx->eax; break;
13327 case 1: u32EffAddr += pCtx->ecx; break;
13328 case 2: u32EffAddr += pCtx->edx; break;
13329 case 3: u32EffAddr += pCtx->ebx; break;
13330 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13331 case 5:
13332 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13333 {
13334 u32EffAddr += pCtx->ebp;
13335 SET_SS_DEF();
13336 }
13337 else
13338 {
13339 uint32_t u32Disp;
13340 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13341 u32EffAddr += u32Disp;
13342 }
13343 break;
13344 case 6: u32EffAddr += pCtx->esi; break;
13345 case 7: u32EffAddr += pCtx->edi; break;
13346 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13347 }
13348 break;
13349 }
13350 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13351 case 6: u32EffAddr = pCtx->esi; break;
13352 case 7: u32EffAddr = pCtx->edi; break;
13353 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13354 }
13355
13356 /* Get and add the displacement. */
13357 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13358 {
13359 case 0:
13360 break;
13361 case 1:
13362 {
13363 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13364 u32EffAddr += i8Disp;
13365 break;
13366 }
13367 case 2:
13368 {
13369 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13370 u32EffAddr += u32Disp;
13371 break;
13372 }
13373 default:
13374 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13375 }
13376 }
13377
13378 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13379 {
13380 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13381 return u32EffAddr;
13382 }
13383 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13384 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13385 return u32EffAddr & UINT16_MAX;
13386 }
13387
13388 uint64_t u64EffAddr;
13389
13390 /* Handle the rip+disp32 form with no registers first. */
13391 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13392 {
13393 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13394 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13395 }
13396 else
13397 {
13398 /* Get the register (or SIB) value. */
13399 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13400 {
13401 case 0: u64EffAddr = pCtx->rax; break;
13402 case 1: u64EffAddr = pCtx->rcx; break;
13403 case 2: u64EffAddr = pCtx->rdx; break;
13404 case 3: u64EffAddr = pCtx->rbx; break;
13405 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13406 case 6: u64EffAddr = pCtx->rsi; break;
13407 case 7: u64EffAddr = pCtx->rdi; break;
13408 case 8: u64EffAddr = pCtx->r8; break;
13409 case 9: u64EffAddr = pCtx->r9; break;
13410 case 10: u64EffAddr = pCtx->r10; break;
13411 case 11: u64EffAddr = pCtx->r11; break;
13412 case 13: u64EffAddr = pCtx->r13; break;
13413 case 14: u64EffAddr = pCtx->r14; break;
13414 case 15: u64EffAddr = pCtx->r15; break;
13415 /* SIB */
13416 case 4:
13417 case 12:
13418 {
13419 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13420
13421 /* Get the index and scale it. */
13422 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13423 {
13424 case 0: u64EffAddr = pCtx->rax; break;
13425 case 1: u64EffAddr = pCtx->rcx; break;
13426 case 2: u64EffAddr = pCtx->rdx; break;
13427 case 3: u64EffAddr = pCtx->rbx; break;
13428 case 4: u64EffAddr = 0; /*none */ break;
13429 case 5: u64EffAddr = pCtx->rbp; break;
13430 case 6: u64EffAddr = pCtx->rsi; break;
13431 case 7: u64EffAddr = pCtx->rdi; break;
13432 case 8: u64EffAddr = pCtx->r8; break;
13433 case 9: u64EffAddr = pCtx->r9; break;
13434 case 10: u64EffAddr = pCtx->r10; break;
13435 case 11: u64EffAddr = pCtx->r11; break;
13436 case 12: u64EffAddr = pCtx->r12; break;
13437 case 13: u64EffAddr = pCtx->r13; break;
13438 case 14: u64EffAddr = pCtx->r14; break;
13439 case 15: u64EffAddr = pCtx->r15; break;
13440 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13441 }
13442 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13443
13444 /* add base */
13445 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13446 {
13447 case 0: u64EffAddr += pCtx->rax; break;
13448 case 1: u64EffAddr += pCtx->rcx; break;
13449 case 2: u64EffAddr += pCtx->rdx; break;
13450 case 3: u64EffAddr += pCtx->rbx; break;
13451 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13452 case 6: u64EffAddr += pCtx->rsi; break;
13453 case 7: u64EffAddr += pCtx->rdi; break;
13454 case 8: u64EffAddr += pCtx->r8; break;
13455 case 9: u64EffAddr += pCtx->r9; break;
13456 case 10: u64EffAddr += pCtx->r10; break;
13457 case 11: u64EffAddr += pCtx->r11; break;
13458 case 12: u64EffAddr += pCtx->r12; break;
13459 case 14: u64EffAddr += pCtx->r14; break;
13460 case 15: u64EffAddr += pCtx->r15; break;
13461 /* complicated encodings */
13462 case 5:
13463 case 13:
13464 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13465 {
13466 if (!pVCpu->iem.s.uRexB)
13467 {
13468 u64EffAddr += pCtx->rbp;
13469 SET_SS_DEF();
13470 }
13471 else
13472 u64EffAddr += pCtx->r13;
13473 }
13474 else
13475 {
13476 uint32_t u32Disp;
13477 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13478 u64EffAddr += (int32_t)u32Disp;
13479 }
13480 break;
13481 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13482 }
13483 break;
13484 }
13485 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13486 }
13487
13488 /* Get and add the displacement. */
13489 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13490 {
13491 case 0:
13492 break;
13493 case 1:
13494 {
13495 int8_t i8Disp;
13496 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13497 u64EffAddr += i8Disp;
13498 break;
13499 }
13500 case 2:
13501 {
13502 uint32_t u32Disp;
13503 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13504 u64EffAddr += (int32_t)u32Disp;
13505 break;
13506 }
13507 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13508 }
13509
13510 }
13511
13512 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13513 {
13514 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13515 return u64EffAddr;
13516 }
13517 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13518 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13519 return u64EffAddr & UINT32_MAX;
13520}
13521#endif /* IEM_WITH_SETJMP */
13522
13523
13524/** @} */
13525
13526
13527
13528/*
13529 * Include the instructions
13530 */
13531#include "IEMAllInstructions.cpp.h"
13532
13533
13534
13535
13536#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13537
13538/**
13539 * Sets up execution verification mode.
13540 */
13541IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13542{
13543 PVMCPU pVCpu = pVCpu;
13544 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13545
13546 /*
13547 * Always note down the address of the current instruction.
13548 */
13549 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13550 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13551
13552 /*
13553 * Enable verification and/or logging.
13554 */
13555 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13556 if ( fNewNoRem
13557 && ( 0
13558#if 0 /* auto enable on first paged protected mode interrupt */
13559 || ( pOrgCtx->eflags.Bits.u1IF
13560 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13561 && TRPMHasTrap(pVCpu)
13562 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13563#endif
13564#if 0
13565 || ( pOrgCtx->cs == 0x10
13566 && ( pOrgCtx->rip == 0x90119e3e
13567 || pOrgCtx->rip == 0x901d9810)
13568#endif
13569#if 0 /* Auto enable DSL - FPU stuff. */
13570 || ( pOrgCtx->cs == 0x10
13571 && (// pOrgCtx->rip == 0xc02ec07f
13572 //|| pOrgCtx->rip == 0xc02ec082
13573 //|| pOrgCtx->rip == 0xc02ec0c9
13574 0
13575 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13576#endif
13577#if 0 /* Auto enable DSL - fstp st0 stuff. */
13578 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13579#endif
13580#if 0
13581 || pOrgCtx->rip == 0x9022bb3a
13582#endif
13583#if 0
13584 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13585#endif
13586#if 0
13587 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13588 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13589#endif
13590#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13591 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13592 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13593 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13594#endif
13595#if 0 /* NT4SP1 - xadd early boot. */
13596 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13597#endif
13598#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13599 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13600#endif
13601#if 0 /* NT4SP1 - cmpxchg (AMD). */
13602 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13603#endif
13604#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13605 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13606#endif
13607#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13608 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13609
13610#endif
13611#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13612 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13613
13614#endif
13615#if 0 /* NT4SP1 - frstor [ecx] */
13616 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13617#endif
13618#if 0 /* xxxxxx - All long mode code. */
13619 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13620#endif
13621#if 0 /* rep movsq linux 3.7 64-bit boot. */
13622 || (pOrgCtx->rip == 0x0000000000100241)
13623#endif
13624#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13625 || (pOrgCtx->rip == 0x000000000215e240)
13626#endif
13627#if 0 /* DOS's size-overridden iret to v8086. */
13628 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13629#endif
13630 )
13631 )
13632 {
13633 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13634 RTLogFlags(NULL, "enabled");
13635 fNewNoRem = false;
13636 }
13637 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13638 {
13639 pVCpu->iem.s.fNoRem = fNewNoRem;
13640 if (!fNewNoRem)
13641 {
13642 LogAlways(("Enabling verification mode!\n"));
13643 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13644 }
13645 else
13646 LogAlways(("Disabling verification mode!\n"));
13647 }
13648
13649 /*
13650 * Switch state.
13651 */
13652 if (IEM_VERIFICATION_ENABLED(pVCpu))
13653 {
13654 static CPUMCTX s_DebugCtx; /* Ugly! */
13655
13656 s_DebugCtx = *pOrgCtx;
13657 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13658 }
13659
13660 /*
13661 * See if there is an interrupt pending in TRPM and inject it if we can.
13662 */
13663 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13664 if ( pOrgCtx->eflags.Bits.u1IF
13665 && TRPMHasTrap(pVCpu)
13666 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13667 {
13668 uint8_t u8TrapNo;
13669 TRPMEVENT enmType;
13670 RTGCUINT uErrCode;
13671 RTGCPTR uCr2;
13672 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13673 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13674 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13675 TRPMResetTrap(pVCpu);
13676 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13677 }
13678
13679 /*
13680 * Reset the counters.
13681 */
13682 pVCpu->iem.s.cIOReads = 0;
13683 pVCpu->iem.s.cIOWrites = 0;
13684 pVCpu->iem.s.fIgnoreRaxRdx = false;
13685 pVCpu->iem.s.fOverlappingMovs = false;
13686 pVCpu->iem.s.fProblematicMemory = false;
13687 pVCpu->iem.s.fUndefinedEFlags = 0;
13688
13689 if (IEM_VERIFICATION_ENABLED(pVCpu))
13690 {
13691 /*
13692 * Free all verification records.
13693 */
13694 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13695 pVCpu->iem.s.pIemEvtRecHead = NULL;
13696 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13697 do
13698 {
13699 while (pEvtRec)
13700 {
13701 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13702 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13703 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13704 pEvtRec = pNext;
13705 }
13706 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13707 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13708 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13709 } while (pEvtRec);
13710 }
13711}
13712
13713
13714/**
13715 * Allocate an event record.
13716 * @returns Pointer to a record.
13717 */
13718IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13719{
13720 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13721 return NULL;
13722
13723 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13724 if (pEvtRec)
13725 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13726 else
13727 {
13728 if (!pVCpu->iem.s.ppIemEvtRecNext)
13729 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13730
13731 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13732 if (!pEvtRec)
13733 return NULL;
13734 }
13735 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13736 pEvtRec->pNext = NULL;
13737 return pEvtRec;
13738}
13739
13740
13741/**
13742 * IOMMMIORead notification.
13743 */
13744VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13745{
13746 PVMCPU pVCpu = VMMGetCpu(pVM);
13747 if (!pVCpu)
13748 return;
13749 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13750 if (!pEvtRec)
13751 return;
13752 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13753 pEvtRec->u.RamRead.GCPhys = GCPhys;
13754 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13755 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13756 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13757}
13758
13759
13760/**
13761 * IOMMMIOWrite notification.
13762 */
13763VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13764{
13765 PVMCPU pVCpu = VMMGetCpu(pVM);
13766 if (!pVCpu)
13767 return;
13768 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13769 if (!pEvtRec)
13770 return;
13771 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13772 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13773 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13774 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13775 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13776 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13777 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13778 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13779 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13780}
13781
13782
13783/**
13784 * IOMIOPortRead notification.
13785 */
13786VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13787{
13788 PVMCPU pVCpu = VMMGetCpu(pVM);
13789 if (!pVCpu)
13790 return;
13791 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13792 if (!pEvtRec)
13793 return;
13794 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13795 pEvtRec->u.IOPortRead.Port = Port;
13796 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13797 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13798 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13799}
13800
13801/**
13802 * IOMIOPortWrite notification.
13803 */
13804VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13805{
13806 PVMCPU pVCpu = VMMGetCpu(pVM);
13807 if (!pVCpu)
13808 return;
13809 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13810 if (!pEvtRec)
13811 return;
13812 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13813 pEvtRec->u.IOPortWrite.Port = Port;
13814 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13815 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13816 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13817 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13818}
13819
13820
13821VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13822{
13823 PVMCPU pVCpu = VMMGetCpu(pVM);
13824 if (!pVCpu)
13825 return;
13826 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13827 if (!pEvtRec)
13828 return;
13829 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13830 pEvtRec->u.IOPortStrRead.Port = Port;
13831 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13832 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13833 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13834 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13835}
13836
13837
13838VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13839{
13840 PVMCPU pVCpu = VMMGetCpu(pVM);
13841 if (!pVCpu)
13842 return;
13843 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13844 if (!pEvtRec)
13845 return;
13846 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13847 pEvtRec->u.IOPortStrWrite.Port = Port;
13848 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13849 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13850 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13851 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13852}
13853
13854
13855/**
13856 * Fakes and records an I/O port read.
13857 *
13858 * @returns VINF_SUCCESS.
13859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13860 * @param Port The I/O port.
13861 * @param pu32Value Where to store the fake value.
13862 * @param cbValue The size of the access.
13863 */
13864IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13865{
13866 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13867 if (pEvtRec)
13868 {
13869 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13870 pEvtRec->u.IOPortRead.Port = Port;
13871 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13872 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13873 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13874 }
13875 pVCpu->iem.s.cIOReads++;
13876 *pu32Value = 0xcccccccc;
13877 return VINF_SUCCESS;
13878}
13879
13880
13881/**
13882 * Fakes and records an I/O port write.
13883 *
13884 * @returns VINF_SUCCESS.
13885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13886 * @param Port The I/O port.
13887 * @param u32Value The value being written.
13888 * @param cbValue The size of the access.
13889 */
13890IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13891{
13892 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13893 if (pEvtRec)
13894 {
13895 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13896 pEvtRec->u.IOPortWrite.Port = Port;
13897 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13898 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13899 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13900 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13901 }
13902 pVCpu->iem.s.cIOWrites++;
13903 return VINF_SUCCESS;
13904}
13905
13906
13907/**
13908 * Used to add extra details about a stub case.
13909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13910 */
13911IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13912{
13913 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13914 PVM pVM = pVCpu->CTX_SUFF(pVM);
13915 PVMCPU pVCpu = pVCpu;
13916 char szRegs[4096];
13917 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13918 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13919 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13920 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13921 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13922 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13923 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13924 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13925 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13926 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13927 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13928 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13929 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13930 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13931 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13932 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13933 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13934 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13935 " efer=%016VR{efer}\n"
13936 " pat=%016VR{pat}\n"
13937 " sf_mask=%016VR{sf_mask}\n"
13938 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13939 " lstar=%016VR{lstar}\n"
13940 " star=%016VR{star} cstar=%016VR{cstar}\n"
13941 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13942 );
13943
13944 char szInstr1[256];
13945 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13946 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13947 szInstr1, sizeof(szInstr1), NULL);
13948 char szInstr2[256];
13949 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13950 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13951 szInstr2, sizeof(szInstr2), NULL);
13952
13953 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13954}
13955
13956
13957/**
13958 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13959 * dump to the assertion info.
13960 *
13961 * @param pEvtRec The record to dump.
13962 */
13963IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13964{
13965 switch (pEvtRec->enmEvent)
13966 {
13967 case IEMVERIFYEVENT_IOPORT_READ:
13968 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13969 pEvtRec->u.IOPortWrite.Port,
13970 pEvtRec->u.IOPortWrite.cbValue);
13971 break;
13972 case IEMVERIFYEVENT_IOPORT_WRITE:
13973 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13974 pEvtRec->u.IOPortWrite.Port,
13975 pEvtRec->u.IOPortWrite.cbValue,
13976 pEvtRec->u.IOPortWrite.u32Value);
13977 break;
13978 case IEMVERIFYEVENT_IOPORT_STR_READ:
13979 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13980 pEvtRec->u.IOPortStrWrite.Port,
13981 pEvtRec->u.IOPortStrWrite.cbValue,
13982 pEvtRec->u.IOPortStrWrite.cTransfers);
13983 break;
13984 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13985 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13986 pEvtRec->u.IOPortStrWrite.Port,
13987 pEvtRec->u.IOPortStrWrite.cbValue,
13988 pEvtRec->u.IOPortStrWrite.cTransfers);
13989 break;
13990 case IEMVERIFYEVENT_RAM_READ:
13991 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13992 pEvtRec->u.RamRead.GCPhys,
13993 pEvtRec->u.RamRead.cb);
13994 break;
13995 case IEMVERIFYEVENT_RAM_WRITE:
13996 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13997 pEvtRec->u.RamWrite.GCPhys,
13998 pEvtRec->u.RamWrite.cb,
13999 (int)pEvtRec->u.RamWrite.cb,
14000 pEvtRec->u.RamWrite.ab);
14001 break;
14002 default:
14003 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
14004 break;
14005 }
14006}
14007
14008
14009/**
14010 * Raises an assertion on the specified record, showing the given message with
14011 * a record dump attached.
14012 *
14013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14014 * @param pEvtRec1 The first record.
14015 * @param pEvtRec2 The second record.
14016 * @param pszMsg The message explaining why we're asserting.
14017 */
14018IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
14019{
14020 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14021 iemVerifyAssertAddRecordDump(pEvtRec1);
14022 iemVerifyAssertAddRecordDump(pEvtRec2);
14023 iemVerifyAssertMsg2(pVCpu);
14024 RTAssertPanic();
14025}
14026
14027
14028/**
14029 * Raises an assertion on the specified record, showing the given message with
14030 * a record dump attached.
14031 *
14032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14033 * @param pEvtRec1 The first record.
14034 * @param pszMsg The message explaining why we're asserting.
14035 */
14036IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
14037{
14038 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14039 iemVerifyAssertAddRecordDump(pEvtRec);
14040 iemVerifyAssertMsg2(pVCpu);
14041 RTAssertPanic();
14042}
14043
14044
14045/**
14046 * Verifies a write record.
14047 *
14048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14049 * @param pEvtRec The write record.
14050 * @param fRem Set if REM was doing the other executing. If clear
14051 * it was HM.
14052 */
14053IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
14054{
14055 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
14056 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
14057 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
14058 if ( RT_FAILURE(rc)
14059 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
14060 {
14061 /* fend off ins */
14062 if ( !pVCpu->iem.s.cIOReads
14063 || pEvtRec->u.RamWrite.ab[0] != 0xcc
14064 || ( pEvtRec->u.RamWrite.cb != 1
14065 && pEvtRec->u.RamWrite.cb != 2
14066 && pEvtRec->u.RamWrite.cb != 4) )
14067 {
14068 /* fend off ROMs and MMIO */
14069 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
14070 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
14071 {
14072 /* fend off fxsave */
14073 if (pEvtRec->u.RamWrite.cb != 512)
14074 {
14075 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
14076 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
14077 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
14078 RTAssertMsg2Add("%s: %.*Rhxs\n"
14079 "iem: %.*Rhxs\n",
14080 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
14081 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
14082 iemVerifyAssertAddRecordDump(pEvtRec);
14083 iemVerifyAssertMsg2(pVCpu);
14084 RTAssertPanic();
14085 }
14086 }
14087 }
14088 }
14089
14090}
14091
14092/**
14093 * Performs the post-execution verfication checks.
14094 */
14095IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
14096{
14097 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14098 return rcStrictIem;
14099
14100 /*
14101 * Switch back the state.
14102 */
14103 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
14104 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
14105 Assert(pOrgCtx != pDebugCtx);
14106 IEM_GET_CTX(pVCpu) = pOrgCtx;
14107
14108 /*
14109 * Execute the instruction in REM.
14110 */
14111 bool fRem = false;
14112 PVM pVM = pVCpu->CTX_SUFF(pVM);
14113 PVMCPU pVCpu = pVCpu;
14114 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
14115#ifdef IEM_VERIFICATION_MODE_FULL_HM
14116 if ( HMIsEnabled(pVM)
14117 && pVCpu->iem.s.cIOReads == 0
14118 && pVCpu->iem.s.cIOWrites == 0
14119 && !pVCpu->iem.s.fProblematicMemory)
14120 {
14121 uint64_t uStartRip = pOrgCtx->rip;
14122 unsigned iLoops = 0;
14123 do
14124 {
14125 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
14126 iLoops++;
14127 } while ( rc == VINF_SUCCESS
14128 || ( rc == VINF_EM_DBG_STEPPED
14129 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14130 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
14131 || ( pOrgCtx->rip != pDebugCtx->rip
14132 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
14133 && iLoops < 8) );
14134 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
14135 rc = VINF_SUCCESS;
14136 }
14137#endif
14138 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
14139 || rc == VINF_IOM_R3_IOPORT_READ
14140 || rc == VINF_IOM_R3_IOPORT_WRITE
14141 || rc == VINF_IOM_R3_MMIO_READ
14142 || rc == VINF_IOM_R3_MMIO_READ_WRITE
14143 || rc == VINF_IOM_R3_MMIO_WRITE
14144 || rc == VINF_CPUM_R3_MSR_READ
14145 || rc == VINF_CPUM_R3_MSR_WRITE
14146 || rc == VINF_EM_RESCHEDULE
14147 )
14148 {
14149 EMRemLock(pVM);
14150 rc = REMR3EmulateInstruction(pVM, pVCpu);
14151 AssertRC(rc);
14152 EMRemUnlock(pVM);
14153 fRem = true;
14154 }
14155
14156# if 1 /* Skip unimplemented instructions for now. */
14157 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14158 {
14159 IEM_GET_CTX(pVCpu) = pOrgCtx;
14160 if (rc == VINF_EM_DBG_STEPPED)
14161 return VINF_SUCCESS;
14162 return rc;
14163 }
14164# endif
14165
14166 /*
14167 * Compare the register states.
14168 */
14169 unsigned cDiffs = 0;
14170 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
14171 {
14172 //Log(("REM and IEM ends up with different registers!\n"));
14173 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
14174
14175# define CHECK_FIELD(a_Field) \
14176 do \
14177 { \
14178 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14179 { \
14180 switch (sizeof(pOrgCtx->a_Field)) \
14181 { \
14182 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14183 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14184 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14185 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
14186 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14187 } \
14188 cDiffs++; \
14189 } \
14190 } while (0)
14191# define CHECK_XSTATE_FIELD(a_Field) \
14192 do \
14193 { \
14194 if (pOrgXState->a_Field != pDebugXState->a_Field) \
14195 { \
14196 switch (sizeof(pOrgXState->a_Field)) \
14197 { \
14198 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14199 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14200 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14201 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
14202 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
14203 } \
14204 cDiffs++; \
14205 } \
14206 } while (0)
14207
14208# define CHECK_BIT_FIELD(a_Field) \
14209 do \
14210 { \
14211 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
14212 { \
14213 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
14214 cDiffs++; \
14215 } \
14216 } while (0)
14217
14218# define CHECK_SEL(a_Sel) \
14219 do \
14220 { \
14221 CHECK_FIELD(a_Sel.Sel); \
14222 CHECK_FIELD(a_Sel.Attr.u); \
14223 CHECK_FIELD(a_Sel.u64Base); \
14224 CHECK_FIELD(a_Sel.u32Limit); \
14225 CHECK_FIELD(a_Sel.fFlags); \
14226 } while (0)
14227
14228 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
14229 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
14230
14231#if 1 /* The recompiler doesn't update these the intel way. */
14232 if (fRem)
14233 {
14234 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
14235 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
14236 pOrgXState->x87.CS = pDebugXState->x87.CS;
14237 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
14238 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
14239 pOrgXState->x87.DS = pDebugXState->x87.DS;
14240 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
14241 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
14242 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
14243 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
14244 }
14245#endif
14246 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
14247 {
14248 RTAssertMsg2Weak(" the FPU state differs\n");
14249 cDiffs++;
14250 CHECK_XSTATE_FIELD(x87.FCW);
14251 CHECK_XSTATE_FIELD(x87.FSW);
14252 CHECK_XSTATE_FIELD(x87.FTW);
14253 CHECK_XSTATE_FIELD(x87.FOP);
14254 CHECK_XSTATE_FIELD(x87.FPUIP);
14255 CHECK_XSTATE_FIELD(x87.CS);
14256 CHECK_XSTATE_FIELD(x87.Rsrvd1);
14257 CHECK_XSTATE_FIELD(x87.FPUDP);
14258 CHECK_XSTATE_FIELD(x87.DS);
14259 CHECK_XSTATE_FIELD(x87.Rsrvd2);
14260 CHECK_XSTATE_FIELD(x87.MXCSR);
14261 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
14262 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
14263 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
14264 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
14265 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
14266 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
14267 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
14268 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
14269 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
14270 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
14271 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
14272 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
14273 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
14274 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
14275 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
14276 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
14277 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
14278 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
14279 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
14280 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
14281 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
14282 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
14283 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
14284 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
14285 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
14286 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
14287 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
14288 }
14289 CHECK_FIELD(rip);
14290 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
14291 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
14292 {
14293 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
14294 CHECK_BIT_FIELD(rflags.Bits.u1CF);
14295 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
14296 CHECK_BIT_FIELD(rflags.Bits.u1PF);
14297 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
14298 CHECK_BIT_FIELD(rflags.Bits.u1AF);
14299 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
14300 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
14301 CHECK_BIT_FIELD(rflags.Bits.u1SF);
14302 CHECK_BIT_FIELD(rflags.Bits.u1TF);
14303 CHECK_BIT_FIELD(rflags.Bits.u1IF);
14304 CHECK_BIT_FIELD(rflags.Bits.u1DF);
14305 CHECK_BIT_FIELD(rflags.Bits.u1OF);
14306 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
14307 CHECK_BIT_FIELD(rflags.Bits.u1NT);
14308 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
14309 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
14310 CHECK_BIT_FIELD(rflags.Bits.u1RF);
14311 CHECK_BIT_FIELD(rflags.Bits.u1VM);
14312 CHECK_BIT_FIELD(rflags.Bits.u1AC);
14313 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
14314 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
14315 CHECK_BIT_FIELD(rflags.Bits.u1ID);
14316 }
14317
14318 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
14319 CHECK_FIELD(rax);
14320 CHECK_FIELD(rcx);
14321 if (!pVCpu->iem.s.fIgnoreRaxRdx)
14322 CHECK_FIELD(rdx);
14323 CHECK_FIELD(rbx);
14324 CHECK_FIELD(rsp);
14325 CHECK_FIELD(rbp);
14326 CHECK_FIELD(rsi);
14327 CHECK_FIELD(rdi);
14328 CHECK_FIELD(r8);
14329 CHECK_FIELD(r9);
14330 CHECK_FIELD(r10);
14331 CHECK_FIELD(r11);
14332 CHECK_FIELD(r12);
14333 CHECK_FIELD(r13);
14334 CHECK_SEL(cs);
14335 CHECK_SEL(ss);
14336 CHECK_SEL(ds);
14337 CHECK_SEL(es);
14338 CHECK_SEL(fs);
14339 CHECK_SEL(gs);
14340 CHECK_FIELD(cr0);
14341
14342 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14343 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14344 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14345 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14346 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14347 {
14348 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14349 { /* ignore */ }
14350 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14351 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14352 && fRem)
14353 { /* ignore */ }
14354 else
14355 CHECK_FIELD(cr2);
14356 }
14357 CHECK_FIELD(cr3);
14358 CHECK_FIELD(cr4);
14359 CHECK_FIELD(dr[0]);
14360 CHECK_FIELD(dr[1]);
14361 CHECK_FIELD(dr[2]);
14362 CHECK_FIELD(dr[3]);
14363 CHECK_FIELD(dr[6]);
14364 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14365 CHECK_FIELD(dr[7]);
14366 CHECK_FIELD(gdtr.cbGdt);
14367 CHECK_FIELD(gdtr.pGdt);
14368 CHECK_FIELD(idtr.cbIdt);
14369 CHECK_FIELD(idtr.pIdt);
14370 CHECK_SEL(ldtr);
14371 CHECK_SEL(tr);
14372 CHECK_FIELD(SysEnter.cs);
14373 CHECK_FIELD(SysEnter.eip);
14374 CHECK_FIELD(SysEnter.esp);
14375 CHECK_FIELD(msrEFER);
14376 CHECK_FIELD(msrSTAR);
14377 CHECK_FIELD(msrPAT);
14378 CHECK_FIELD(msrLSTAR);
14379 CHECK_FIELD(msrCSTAR);
14380 CHECK_FIELD(msrSFMASK);
14381 CHECK_FIELD(msrKERNELGSBASE);
14382
14383 if (cDiffs != 0)
14384 {
14385 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14386 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14387 RTAssertPanic();
14388 static bool volatile s_fEnterDebugger = true;
14389 if (s_fEnterDebugger)
14390 DBGFSTOP(pVM);
14391
14392# if 1 /* Ignore unimplemented instructions for now. */
14393 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14394 rcStrictIem = VINF_SUCCESS;
14395# endif
14396 }
14397# undef CHECK_FIELD
14398# undef CHECK_BIT_FIELD
14399 }
14400
14401 /*
14402 * If the register state compared fine, check the verification event
14403 * records.
14404 */
14405 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14406 {
14407 /*
14408 * Compare verficiation event records.
14409 * - I/O port accesses should be a 1:1 match.
14410 */
14411 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14412 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14413 while (pIemRec && pOtherRec)
14414 {
14415 /* Since we might miss RAM writes and reads, ignore reads and check
14416 that any written memory is the same extra ones. */
14417 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14418 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14419 && pIemRec->pNext)
14420 {
14421 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14422 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14423 pIemRec = pIemRec->pNext;
14424 }
14425
14426 /* Do the compare. */
14427 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14428 {
14429 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14430 break;
14431 }
14432 bool fEquals;
14433 switch (pIemRec->enmEvent)
14434 {
14435 case IEMVERIFYEVENT_IOPORT_READ:
14436 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14437 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14438 break;
14439 case IEMVERIFYEVENT_IOPORT_WRITE:
14440 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14441 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14442 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14443 break;
14444 case IEMVERIFYEVENT_IOPORT_STR_READ:
14445 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14446 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14447 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14448 break;
14449 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14450 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14451 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14452 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14453 break;
14454 case IEMVERIFYEVENT_RAM_READ:
14455 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14456 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14457 break;
14458 case IEMVERIFYEVENT_RAM_WRITE:
14459 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14460 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14461 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14462 break;
14463 default:
14464 fEquals = false;
14465 break;
14466 }
14467 if (!fEquals)
14468 {
14469 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14470 break;
14471 }
14472
14473 /* advance */
14474 pIemRec = pIemRec->pNext;
14475 pOtherRec = pOtherRec->pNext;
14476 }
14477
14478 /* Ignore extra writes and reads. */
14479 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14480 {
14481 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14482 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14483 pIemRec = pIemRec->pNext;
14484 }
14485 if (pIemRec != NULL)
14486 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14487 else if (pOtherRec != NULL)
14488 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14489 }
14490 IEM_GET_CTX(pVCpu) = pOrgCtx;
14491
14492 return rcStrictIem;
14493}
14494
14495#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14496
14497/* stubs */
14498IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14499{
14500 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14501 return VERR_INTERNAL_ERROR;
14502}
14503
14504IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14505{
14506 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14507 return VERR_INTERNAL_ERROR;
14508}
14509
14510#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14511
14512
14513#ifdef LOG_ENABLED
14514/**
14515 * Logs the current instruction.
14516 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14517 * @param pCtx The current CPU context.
14518 * @param fSameCtx Set if we have the same context information as the VMM,
14519 * clear if we may have already executed an instruction in
14520 * our debug context. When clear, we assume IEMCPU holds
14521 * valid CPU mode info.
14522 */
14523IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14524{
14525# ifdef IN_RING3
14526 if (LogIs2Enabled())
14527 {
14528 char szInstr[256];
14529 uint32_t cbInstr = 0;
14530 if (fSameCtx)
14531 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14532 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14533 szInstr, sizeof(szInstr), &cbInstr);
14534 else
14535 {
14536 uint32_t fFlags = 0;
14537 switch (pVCpu->iem.s.enmCpuMode)
14538 {
14539 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14540 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14541 case IEMMODE_16BIT:
14542 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14543 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14544 else
14545 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14546 break;
14547 }
14548 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14549 szInstr, sizeof(szInstr), &cbInstr);
14550 }
14551
14552 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14553 Log2(("****\n"
14554 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14555 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14556 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14557 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14558 " %s\n"
14559 ,
14560 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14561 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14562 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14563 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14564 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14565 szInstr));
14566
14567 if (LogIs3Enabled())
14568 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14569 }
14570 else
14571# endif
14572 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14573 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14574 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14575}
14576#endif
14577
14578
14579/**
14580 * Makes status code addjustments (pass up from I/O and access handler)
14581 * as well as maintaining statistics.
14582 *
14583 * @returns Strict VBox status code to pass up.
14584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14585 * @param rcStrict The status from executing an instruction.
14586 */
14587DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14588{
14589 if (rcStrict != VINF_SUCCESS)
14590 {
14591 if (RT_SUCCESS(rcStrict))
14592 {
14593 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14594 || rcStrict == VINF_IOM_R3_IOPORT_READ
14595 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14596 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14597 || rcStrict == VINF_IOM_R3_MMIO_READ
14598 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14599 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14600 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14601 || rcStrict == VINF_CPUM_R3_MSR_READ
14602 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14603 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14604 || rcStrict == VINF_EM_RAW_TO_R3
14605 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14606 /* raw-mode / virt handlers only: */
14607 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14608 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14609 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14610 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14611 || rcStrict == VINF_SELM_SYNC_GDT
14612 || rcStrict == VINF_CSAM_PENDING_ACTION
14613 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14614 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14615/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14616 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14617 if (rcPassUp == VINF_SUCCESS)
14618 pVCpu->iem.s.cRetInfStatuses++;
14619 else if ( rcPassUp < VINF_EM_FIRST
14620 || rcPassUp > VINF_EM_LAST
14621 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14622 {
14623 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14624 pVCpu->iem.s.cRetPassUpStatus++;
14625 rcStrict = rcPassUp;
14626 }
14627 else
14628 {
14629 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14630 pVCpu->iem.s.cRetInfStatuses++;
14631 }
14632 }
14633 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14634 pVCpu->iem.s.cRetAspectNotImplemented++;
14635 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14636 pVCpu->iem.s.cRetInstrNotImplemented++;
14637#ifdef IEM_VERIFICATION_MODE_FULL
14638 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14639 rcStrict = VINF_SUCCESS;
14640#endif
14641 else
14642 pVCpu->iem.s.cRetErrStatuses++;
14643 }
14644 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14645 {
14646 pVCpu->iem.s.cRetPassUpStatus++;
14647 rcStrict = pVCpu->iem.s.rcPassUp;
14648 }
14649
14650 return rcStrict;
14651}
14652
14653
14654/**
14655 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14656 * IEMExecOneWithPrefetchedByPC.
14657 *
14658 * Similar code is found in IEMExecLots.
14659 *
14660 * @return Strict VBox status code.
14661 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14662 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14663 * @param fExecuteInhibit If set, execute the instruction following CLI,
14664 * POP SS and MOV SS,GR.
14665 */
14666DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14667{
14668#ifdef IEM_WITH_SETJMP
14669 VBOXSTRICTRC rcStrict;
14670 jmp_buf JmpBuf;
14671 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14672 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14673 if ((rcStrict = setjmp(JmpBuf)) == 0)
14674 {
14675 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14676 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14677 }
14678 else
14679 pVCpu->iem.s.cLongJumps++;
14680 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14681#else
14682 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14683 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14684#endif
14685 if (rcStrict == VINF_SUCCESS)
14686 pVCpu->iem.s.cInstructions++;
14687 if (pVCpu->iem.s.cActiveMappings > 0)
14688 {
14689 Assert(rcStrict != VINF_SUCCESS);
14690 iemMemRollback(pVCpu);
14691 }
14692//#ifdef DEBUG
14693// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14694//#endif
14695
14696 /* Execute the next instruction as well if a cli, pop ss or
14697 mov ss, Gr has just completed successfully. */
14698 if ( fExecuteInhibit
14699 && rcStrict == VINF_SUCCESS
14700 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14701 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14702 {
14703 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14704 if (rcStrict == VINF_SUCCESS)
14705 {
14706#ifdef LOG_ENABLED
14707 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14708#endif
14709#ifdef IEM_WITH_SETJMP
14710 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14711 if ((rcStrict = setjmp(JmpBuf)) == 0)
14712 {
14713 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14714 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14715 }
14716 else
14717 pVCpu->iem.s.cLongJumps++;
14718 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14719#else
14720 IEM_OPCODE_GET_NEXT_U8(&b);
14721 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14722#endif
14723 if (rcStrict == VINF_SUCCESS)
14724 pVCpu->iem.s.cInstructions++;
14725 if (pVCpu->iem.s.cActiveMappings > 0)
14726 {
14727 Assert(rcStrict != VINF_SUCCESS);
14728 iemMemRollback(pVCpu);
14729 }
14730 }
14731 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14732 }
14733
14734 /*
14735 * Return value fiddling, statistics and sanity assertions.
14736 */
14737 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14738
14739 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14740 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14741#if defined(IEM_VERIFICATION_MODE_FULL)
14742 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14743 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14744 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14745 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14746#endif
14747 return rcStrict;
14748}
14749
14750
14751#ifdef IN_RC
14752/**
14753 * Re-enters raw-mode or ensure we return to ring-3.
14754 *
14755 * @returns rcStrict, maybe modified.
14756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14757 * @param pCtx The current CPU context.
14758 * @param rcStrict The status code returne by the interpreter.
14759 */
14760DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14761{
14762 if ( !pVCpu->iem.s.fInPatchCode
14763 && ( rcStrict == VINF_SUCCESS
14764 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14765 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14766 {
14767 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14768 CPUMRawEnter(pVCpu);
14769 else
14770 {
14771 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14772 rcStrict = VINF_EM_RESCHEDULE;
14773 }
14774 }
14775 return rcStrict;
14776}
14777#endif
14778
14779
14780/**
14781 * Execute one instruction.
14782 *
14783 * @return Strict VBox status code.
14784 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14785 */
14786VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14787{
14788#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14789 if (++pVCpu->iem.s.cVerifyDepth == 1)
14790 iemExecVerificationModeSetup(pVCpu);
14791#endif
14792#ifdef LOG_ENABLED
14793 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14794 iemLogCurInstr(pVCpu, pCtx, true);
14795#endif
14796
14797 /*
14798 * Do the decoding and emulation.
14799 */
14800 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14801 if (rcStrict == VINF_SUCCESS)
14802 rcStrict = iemExecOneInner(pVCpu, true);
14803
14804#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14805 /*
14806 * Assert some sanity.
14807 */
14808 if (pVCpu->iem.s.cVerifyDepth == 1)
14809 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14810 pVCpu->iem.s.cVerifyDepth--;
14811#endif
14812#ifdef IN_RC
14813 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14814#endif
14815 if (rcStrict != VINF_SUCCESS)
14816 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14817 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14818 return rcStrict;
14819}
14820
14821
14822VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14823{
14824 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14825 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14826
14827 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14828 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14829 if (rcStrict == VINF_SUCCESS)
14830 {
14831 rcStrict = iemExecOneInner(pVCpu, true);
14832 if (pcbWritten)
14833 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14834 }
14835
14836#ifdef IN_RC
14837 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14838#endif
14839 return rcStrict;
14840}
14841
14842
14843VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14844 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14845{
14846 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14847 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14848
14849 VBOXSTRICTRC rcStrict;
14850 if ( cbOpcodeBytes
14851 && pCtx->rip == OpcodeBytesPC)
14852 {
14853 iemInitDecoder(pVCpu, false);
14854#ifdef IEM_WITH_CODE_TLB
14855 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14856 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14857 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14858 pVCpu->iem.s.offCurInstrStart = 0;
14859 pVCpu->iem.s.offInstrNextByte = 0;
14860#else
14861 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14862 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14863#endif
14864 rcStrict = VINF_SUCCESS;
14865 }
14866 else
14867 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14868 if (rcStrict == VINF_SUCCESS)
14869 {
14870 rcStrict = iemExecOneInner(pVCpu, true);
14871 }
14872
14873#ifdef IN_RC
14874 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14875#endif
14876 return rcStrict;
14877}
14878
14879
14880VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14881{
14882 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14883 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14884
14885 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14886 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14887 if (rcStrict == VINF_SUCCESS)
14888 {
14889 rcStrict = iemExecOneInner(pVCpu, false);
14890 if (pcbWritten)
14891 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14892 }
14893
14894#ifdef IN_RC
14895 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14896#endif
14897 return rcStrict;
14898}
14899
14900
14901VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14902 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14903{
14904 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14905 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14906
14907 VBOXSTRICTRC rcStrict;
14908 if ( cbOpcodeBytes
14909 && pCtx->rip == OpcodeBytesPC)
14910 {
14911 iemInitDecoder(pVCpu, true);
14912#ifdef IEM_WITH_CODE_TLB
14913 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14914 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14915 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14916 pVCpu->iem.s.offCurInstrStart = 0;
14917 pVCpu->iem.s.offInstrNextByte = 0;
14918#else
14919 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14920 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14921#endif
14922 rcStrict = VINF_SUCCESS;
14923 }
14924 else
14925 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14926 if (rcStrict == VINF_SUCCESS)
14927 rcStrict = iemExecOneInner(pVCpu, false);
14928
14929#ifdef IN_RC
14930 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14931#endif
14932 return rcStrict;
14933}
14934
14935
14936/**
14937 * For debugging DISGetParamSize, may come in handy.
14938 *
14939 * @returns Strict VBox status code.
14940 * @param pVCpu The cross context virtual CPU structure of the
14941 * calling EMT.
14942 * @param pCtxCore The context core structure.
14943 * @param OpcodeBytesPC The PC of the opcode bytes.
14944 * @param pvOpcodeBytes Prefeched opcode bytes.
14945 * @param cbOpcodeBytes Number of prefetched bytes.
14946 * @param pcbWritten Where to return the number of bytes written.
14947 * Optional.
14948 */
14949VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14950 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14951 uint32_t *pcbWritten)
14952{
14953 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14954 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14955
14956 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14957 VBOXSTRICTRC rcStrict;
14958 if ( cbOpcodeBytes
14959 && pCtx->rip == OpcodeBytesPC)
14960 {
14961 iemInitDecoder(pVCpu, true);
14962#ifdef IEM_WITH_CODE_TLB
14963 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14964 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14965 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14966 pVCpu->iem.s.offCurInstrStart = 0;
14967 pVCpu->iem.s.offInstrNextByte = 0;
14968#else
14969 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14970 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14971#endif
14972 rcStrict = VINF_SUCCESS;
14973 }
14974 else
14975 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14976 if (rcStrict == VINF_SUCCESS)
14977 {
14978 rcStrict = iemExecOneInner(pVCpu, false);
14979 if (pcbWritten)
14980 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14981 }
14982
14983#ifdef IN_RC
14984 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14985#endif
14986 return rcStrict;
14987}
14988
14989
14990VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14991{
14992 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14993
14994#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14995 /*
14996 * See if there is an interrupt pending in TRPM, inject it if we can.
14997 */
14998 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14999# ifdef IEM_VERIFICATION_MODE_FULL
15000 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15001# endif
15002 if ( pCtx->eflags.Bits.u1IF
15003 && TRPMHasTrap(pVCpu)
15004 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15005 {
15006 uint8_t u8TrapNo;
15007 TRPMEVENT enmType;
15008 RTGCUINT uErrCode;
15009 RTGCPTR uCr2;
15010 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15011 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15012 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15013 TRPMResetTrap(pVCpu);
15014 }
15015
15016 /*
15017 * Log the state.
15018 */
15019# ifdef LOG_ENABLED
15020 iemLogCurInstr(pVCpu, pCtx, true);
15021# endif
15022
15023 /*
15024 * Do the decoding and emulation.
15025 */
15026 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15027 if (rcStrict == VINF_SUCCESS)
15028 rcStrict = iemExecOneInner(pVCpu, true);
15029
15030 /*
15031 * Assert some sanity.
15032 */
15033 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
15034
15035 /*
15036 * Log and return.
15037 */
15038 if (rcStrict != VINF_SUCCESS)
15039 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15040 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15041 if (pcInstructions)
15042 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15043 return rcStrict;
15044
15045#else /* Not verification mode */
15046
15047 /*
15048 * See if there is an interrupt pending in TRPM, inject it if we can.
15049 */
15050 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15051# ifdef IEM_VERIFICATION_MODE_FULL
15052 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
15053# endif
15054 if ( pCtx->eflags.Bits.u1IF
15055 && TRPMHasTrap(pVCpu)
15056 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
15057 {
15058 uint8_t u8TrapNo;
15059 TRPMEVENT enmType;
15060 RTGCUINT uErrCode;
15061 RTGCPTR uCr2;
15062 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
15063 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
15064 if (!IEM_VERIFICATION_ENABLED(pVCpu))
15065 TRPMResetTrap(pVCpu);
15066 }
15067
15068 /*
15069 * Initial decoder init w/ prefetch, then setup setjmp.
15070 */
15071 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
15072 if (rcStrict == VINF_SUCCESS)
15073 {
15074# ifdef IEM_WITH_SETJMP
15075 jmp_buf JmpBuf;
15076 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
15077 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
15078 pVCpu->iem.s.cActiveMappings = 0;
15079 if ((rcStrict = setjmp(JmpBuf)) == 0)
15080# endif
15081 {
15082 /*
15083 * The run loop. We limit ourselves to 4096 instructions right now.
15084 */
15085 PVM pVM = pVCpu->CTX_SUFF(pVM);
15086 uint32_t cInstr = 4096;
15087 for (;;)
15088 {
15089 /*
15090 * Log the state.
15091 */
15092# ifdef LOG_ENABLED
15093 iemLogCurInstr(pVCpu, pCtx, true);
15094# endif
15095
15096 /*
15097 * Do the decoding and emulation.
15098 */
15099 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
15100 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
15101 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
15102 {
15103 Assert(pVCpu->iem.s.cActiveMappings == 0);
15104 pVCpu->iem.s.cInstructions++;
15105 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
15106 {
15107 uint32_t fCpu = pVCpu->fLocalForcedActions
15108 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
15109 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
15110 | VMCPU_FF_TLB_FLUSH
15111# ifdef VBOX_WITH_RAW_MODE
15112 | VMCPU_FF_TRPM_SYNC_IDT
15113 | VMCPU_FF_SELM_SYNC_TSS
15114 | VMCPU_FF_SELM_SYNC_GDT
15115 | VMCPU_FF_SELM_SYNC_LDT
15116# endif
15117 | VMCPU_FF_INHIBIT_INTERRUPTS
15118 | VMCPU_FF_BLOCK_NMIS
15119 | VMCPU_FF_UNHALT ));
15120
15121 if (RT_LIKELY( ( !fCpu
15122 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
15123 && !pCtx->rflags.Bits.u1IF) )
15124 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
15125 {
15126 if (cInstr-- > 0)
15127 {
15128 Assert(pVCpu->iem.s.cActiveMappings == 0);
15129 iemReInitDecoder(pVCpu);
15130 continue;
15131 }
15132 }
15133 }
15134 Assert(pVCpu->iem.s.cActiveMappings == 0);
15135 }
15136 else if (pVCpu->iem.s.cActiveMappings > 0)
15137 iemMemRollback(pVCpu);
15138 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
15139 break;
15140 }
15141 }
15142# ifdef IEM_WITH_SETJMP
15143 else
15144 {
15145 if (pVCpu->iem.s.cActiveMappings > 0)
15146 iemMemRollback(pVCpu);
15147 pVCpu->iem.s.cLongJumps++;
15148 }
15149 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
15150# endif
15151
15152 /*
15153 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
15154 */
15155 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
15156 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
15157# if defined(IEM_VERIFICATION_MODE_FULL)
15158 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
15159 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
15160 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
15161 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
15162# endif
15163 }
15164
15165 /*
15166 * Maybe re-enter raw-mode and log.
15167 */
15168# ifdef IN_RC
15169 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
15170# endif
15171 if (rcStrict != VINF_SUCCESS)
15172 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15173 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15174 if (pcInstructions)
15175 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
15176 return rcStrict;
15177#endif /* Not verification mode */
15178}
15179
15180
15181
15182/**
15183 * Injects a trap, fault, abort, software interrupt or external interrupt.
15184 *
15185 * The parameter list matches TRPMQueryTrapAll pretty closely.
15186 *
15187 * @returns Strict VBox status code.
15188 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15189 * @param u8TrapNo The trap number.
15190 * @param enmType What type is it (trap/fault/abort), software
15191 * interrupt or hardware interrupt.
15192 * @param uErrCode The error code if applicable.
15193 * @param uCr2 The CR2 value if applicable.
15194 * @param cbInstr The instruction length (only relevant for
15195 * software interrupts).
15196 */
15197VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
15198 uint8_t cbInstr)
15199{
15200 iemInitDecoder(pVCpu, false);
15201#ifdef DBGFTRACE_ENABLED
15202 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
15203 u8TrapNo, enmType, uErrCode, uCr2);
15204#endif
15205
15206 uint32_t fFlags;
15207 switch (enmType)
15208 {
15209 case TRPM_HARDWARE_INT:
15210 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
15211 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
15212 uErrCode = uCr2 = 0;
15213 break;
15214
15215 case TRPM_SOFTWARE_INT:
15216 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
15217 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
15218 uErrCode = uCr2 = 0;
15219 break;
15220
15221 case TRPM_TRAP:
15222 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
15223 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
15224 if (u8TrapNo == X86_XCPT_PF)
15225 fFlags |= IEM_XCPT_FLAGS_CR2;
15226 switch (u8TrapNo)
15227 {
15228 case X86_XCPT_DF:
15229 case X86_XCPT_TS:
15230 case X86_XCPT_NP:
15231 case X86_XCPT_SS:
15232 case X86_XCPT_PF:
15233 case X86_XCPT_AC:
15234 fFlags |= IEM_XCPT_FLAGS_ERR;
15235 break;
15236
15237 case X86_XCPT_NMI:
15238 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
15239 break;
15240 }
15241 break;
15242
15243 IEM_NOT_REACHED_DEFAULT_CASE_RET();
15244 }
15245
15246 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
15247}
15248
15249
15250/**
15251 * Injects the active TRPM event.
15252 *
15253 * @returns Strict VBox status code.
15254 * @param pVCpu The cross context virtual CPU structure.
15255 */
15256VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
15257{
15258#ifndef IEM_IMPLEMENTS_TASKSWITCH
15259 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
15260#else
15261 uint8_t u8TrapNo;
15262 TRPMEVENT enmType;
15263 RTGCUINT uErrCode;
15264 RTGCUINTPTR uCr2;
15265 uint8_t cbInstr;
15266 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
15267 if (RT_FAILURE(rc))
15268 return rc;
15269
15270 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
15271
15272 /** @todo Are there any other codes that imply the event was successfully
15273 * delivered to the guest? See @bugref{6607}. */
15274 if ( rcStrict == VINF_SUCCESS
15275 || rcStrict == VINF_IEM_RAISED_XCPT)
15276 {
15277 TRPMResetTrap(pVCpu);
15278 }
15279 return rcStrict;
15280#endif
15281}
15282
15283
15284VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
15285{
15286 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15287 return VERR_NOT_IMPLEMENTED;
15288}
15289
15290
15291VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
15292{
15293 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
15294 return VERR_NOT_IMPLEMENTED;
15295}
15296
15297
15298#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
15299/**
15300 * Executes a IRET instruction with default operand size.
15301 *
15302 * This is for PATM.
15303 *
15304 * @returns VBox status code.
15305 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15306 * @param pCtxCore The register frame.
15307 */
15308VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
15309{
15310 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
15311
15312 iemCtxCoreToCtx(pCtx, pCtxCore);
15313 iemInitDecoder(pVCpu);
15314 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
15315 if (rcStrict == VINF_SUCCESS)
15316 iemCtxToCtxCore(pCtxCore, pCtx);
15317 else
15318 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
15319 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
15320 return rcStrict;
15321}
15322#endif
15323
15324
15325/**
15326 * Macro used by the IEMExec* method to check the given instruction length.
15327 *
15328 * Will return on failure!
15329 *
15330 * @param a_cbInstr The given instruction length.
15331 * @param a_cbMin The minimum length.
15332 */
15333#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15334 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15335 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15336
15337
15338/**
15339 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15340 *
15341 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15342 *
15343 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15345 * @param rcStrict The status code to fiddle.
15346 */
15347DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15348{
15349 iemUninitExec(pVCpu);
15350#ifdef IN_RC
15351 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15352 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15353#else
15354 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15355#endif
15356}
15357
15358
15359/**
15360 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15361 *
15362 * This API ASSUMES that the caller has already verified that the guest code is
15363 * allowed to access the I/O port. (The I/O port is in the DX register in the
15364 * guest state.)
15365 *
15366 * @returns Strict VBox status code.
15367 * @param pVCpu The cross context virtual CPU structure.
15368 * @param cbValue The size of the I/O port access (1, 2, or 4).
15369 * @param enmAddrMode The addressing mode.
15370 * @param fRepPrefix Indicates whether a repeat prefix is used
15371 * (doesn't matter which for this instruction).
15372 * @param cbInstr The instruction length in bytes.
15373 * @param iEffSeg The effective segment address.
15374 * @param fIoChecked Whether the access to the I/O port has been
15375 * checked or not. It's typically checked in the
15376 * HM scenario.
15377 */
15378VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15379 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15380{
15381 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15382 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15383
15384 /*
15385 * State init.
15386 */
15387 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15388
15389 /*
15390 * Switch orgy for getting to the right handler.
15391 */
15392 VBOXSTRICTRC rcStrict;
15393 if (fRepPrefix)
15394 {
15395 switch (enmAddrMode)
15396 {
15397 case IEMMODE_16BIT:
15398 switch (cbValue)
15399 {
15400 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15401 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15402 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15403 default:
15404 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15405 }
15406 break;
15407
15408 case IEMMODE_32BIT:
15409 switch (cbValue)
15410 {
15411 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15412 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15413 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15414 default:
15415 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15416 }
15417 break;
15418
15419 case IEMMODE_64BIT:
15420 switch (cbValue)
15421 {
15422 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15423 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15424 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15425 default:
15426 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15427 }
15428 break;
15429
15430 default:
15431 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15432 }
15433 }
15434 else
15435 {
15436 switch (enmAddrMode)
15437 {
15438 case IEMMODE_16BIT:
15439 switch (cbValue)
15440 {
15441 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15442 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15443 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15444 default:
15445 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15446 }
15447 break;
15448
15449 case IEMMODE_32BIT:
15450 switch (cbValue)
15451 {
15452 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15453 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15454 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15455 default:
15456 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15457 }
15458 break;
15459
15460 case IEMMODE_64BIT:
15461 switch (cbValue)
15462 {
15463 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15464 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15465 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15466 default:
15467 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15468 }
15469 break;
15470
15471 default:
15472 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15473 }
15474 }
15475
15476 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15477}
15478
15479
15480/**
15481 * Interface for HM and EM for executing string I/O IN (read) instructions.
15482 *
15483 * This API ASSUMES that the caller has already verified that the guest code is
15484 * allowed to access the I/O port. (The I/O port is in the DX register in the
15485 * guest state.)
15486 *
15487 * @returns Strict VBox status code.
15488 * @param pVCpu The cross context virtual CPU structure.
15489 * @param cbValue The size of the I/O port access (1, 2, or 4).
15490 * @param enmAddrMode The addressing mode.
15491 * @param fRepPrefix Indicates whether a repeat prefix is used
15492 * (doesn't matter which for this instruction).
15493 * @param cbInstr The instruction length in bytes.
15494 * @param fIoChecked Whether the access to the I/O port has been
15495 * checked or not. It's typically checked in the
15496 * HM scenario.
15497 */
15498VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15499 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15500{
15501 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15502
15503 /*
15504 * State init.
15505 */
15506 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15507
15508 /*
15509 * Switch orgy for getting to the right handler.
15510 */
15511 VBOXSTRICTRC rcStrict;
15512 if (fRepPrefix)
15513 {
15514 switch (enmAddrMode)
15515 {
15516 case IEMMODE_16BIT:
15517 switch (cbValue)
15518 {
15519 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15520 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15521 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15522 default:
15523 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15524 }
15525 break;
15526
15527 case IEMMODE_32BIT:
15528 switch (cbValue)
15529 {
15530 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15531 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15532 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15533 default:
15534 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15535 }
15536 break;
15537
15538 case IEMMODE_64BIT:
15539 switch (cbValue)
15540 {
15541 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15542 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15543 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15544 default:
15545 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15546 }
15547 break;
15548
15549 default:
15550 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15551 }
15552 }
15553 else
15554 {
15555 switch (enmAddrMode)
15556 {
15557 case IEMMODE_16BIT:
15558 switch (cbValue)
15559 {
15560 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15561 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15562 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15563 default:
15564 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15565 }
15566 break;
15567
15568 case IEMMODE_32BIT:
15569 switch (cbValue)
15570 {
15571 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15572 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15573 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15574 default:
15575 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15576 }
15577 break;
15578
15579 case IEMMODE_64BIT:
15580 switch (cbValue)
15581 {
15582 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15583 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15584 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15585 default:
15586 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15587 }
15588 break;
15589
15590 default:
15591 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15592 }
15593 }
15594
15595 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15596}
15597
15598
15599/**
15600 * Interface for rawmode to write execute an OUT instruction.
15601 *
15602 * @returns Strict VBox status code.
15603 * @param pVCpu The cross context virtual CPU structure.
15604 * @param cbInstr The instruction length in bytes.
15605 * @param u16Port The port to read.
15606 * @param cbReg The register size.
15607 *
15608 * @remarks In ring-0 not all of the state needs to be synced in.
15609 */
15610VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15611{
15612 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15613 Assert(cbReg <= 4 && cbReg != 3);
15614
15615 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15616 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15617 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15618}
15619
15620
15621/**
15622 * Interface for rawmode to write execute an IN instruction.
15623 *
15624 * @returns Strict VBox status code.
15625 * @param pVCpu The cross context virtual CPU structure.
15626 * @param cbInstr The instruction length in bytes.
15627 * @param u16Port The port to read.
15628 * @param cbReg The register size.
15629 */
15630VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15631{
15632 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15633 Assert(cbReg <= 4 && cbReg != 3);
15634
15635 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15636 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15637 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15638}
15639
15640
15641/**
15642 * Interface for HM and EM to write to a CRx register.
15643 *
15644 * @returns Strict VBox status code.
15645 * @param pVCpu The cross context virtual CPU structure.
15646 * @param cbInstr The instruction length in bytes.
15647 * @param iCrReg The control register number (destination).
15648 * @param iGReg The general purpose register number (source).
15649 *
15650 * @remarks In ring-0 not all of the state needs to be synced in.
15651 */
15652VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15653{
15654 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15655 Assert(iCrReg < 16);
15656 Assert(iGReg < 16);
15657
15658 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15659 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15660 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15661}
15662
15663
15664/**
15665 * Interface for HM and EM to read from a CRx register.
15666 *
15667 * @returns Strict VBox status code.
15668 * @param pVCpu The cross context virtual CPU structure.
15669 * @param cbInstr The instruction length in bytes.
15670 * @param iGReg The general purpose register number (destination).
15671 * @param iCrReg The control register number (source).
15672 *
15673 * @remarks In ring-0 not all of the state needs to be synced in.
15674 */
15675VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15676{
15677 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15678 Assert(iCrReg < 16);
15679 Assert(iGReg < 16);
15680
15681 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15682 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15683 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15684}
15685
15686
15687/**
15688 * Interface for HM and EM to clear the CR0[TS] bit.
15689 *
15690 * @returns Strict VBox status code.
15691 * @param pVCpu The cross context virtual CPU structure.
15692 * @param cbInstr The instruction length in bytes.
15693 *
15694 * @remarks In ring-0 not all of the state needs to be synced in.
15695 */
15696VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15697{
15698 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15699
15700 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15701 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15702 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15703}
15704
15705
15706/**
15707 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15708 *
15709 * @returns Strict VBox status code.
15710 * @param pVCpu The cross context virtual CPU structure.
15711 * @param cbInstr The instruction length in bytes.
15712 * @param uValue The value to load into CR0.
15713 *
15714 * @remarks In ring-0 not all of the state needs to be synced in.
15715 */
15716VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15717{
15718 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15719
15720 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15721 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15722 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15723}
15724
15725
15726/**
15727 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15728 *
15729 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15730 *
15731 * @returns Strict VBox status code.
15732 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15733 * @param cbInstr The instruction length in bytes.
15734 * @remarks In ring-0 not all of the state needs to be synced in.
15735 * @thread EMT(pVCpu)
15736 */
15737VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15738{
15739 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15740
15741 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15742 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15743 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15744}
15745
15746
15747/**
15748 * Checks if IEM is in the process of delivering an event (interrupt or
15749 * exception).
15750 *
15751 * @returns true if we're in the process of raising an interrupt or exception,
15752 * false otherwise.
15753 * @param pVCpu The cross context virtual CPU structure.
15754 * @param puVector Where to store the vector associated with the
15755 * currently delivered event, optional.
15756 * @param pfFlags Where to store th event delivery flags (see
15757 * IEM_XCPT_FLAGS_XXX), optional.
15758 * @param puErr Where to store the error code associated with the
15759 * event, optional.
15760 * @param puCr2 Where to store the CR2 associated with the event,
15761 * optional.
15762 * @remarks The caller should check the flags to determine if the error code and
15763 * CR2 are valid for the event.
15764 */
15765VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15766{
15767 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15768 if (fRaisingXcpt)
15769 {
15770 if (puVector)
15771 *puVector = pVCpu->iem.s.uCurXcpt;
15772 if (pfFlags)
15773 *pfFlags = pVCpu->iem.s.fCurXcpt;
15774 if (puErr)
15775 *puErr = pVCpu->iem.s.uCurXcptErr;
15776 if (puCr2)
15777 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15778 }
15779 return fRaisingXcpt;
15780}
15781
15782
15783#ifdef VBOX_WITH_NESTED_HWVIRT
15784/**
15785 * Interface for HM and EM to emulate the STGI instruction.
15786 *
15787 * @returns Strict VBox status code.
15788 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15789 * @param cbInstr The instruction length in bytes.
15790 * @thread EMT(pVCpu)
15791 */
15792VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15793{
15794 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15795
15796 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15797 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15798 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15799}
15800
15801
15802/**
15803 * Interface for HM and EM to emulate the STGI instruction.
15804 *
15805 * @returns Strict VBox status code.
15806 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15807 * @param cbInstr The instruction length in bytes.
15808 * @thread EMT(pVCpu)
15809 */
15810VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15811{
15812 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15813
15814 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15815 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15816 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15817}
15818
15819
15820/**
15821 * Interface for HM and EM to emulate the VMLOAD instruction.
15822 *
15823 * @returns Strict VBox status code.
15824 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15825 * @param cbInstr The instruction length in bytes.
15826 * @thread EMT(pVCpu)
15827 */
15828VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15829{
15830 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15831
15832 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15833 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15834 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15835}
15836
15837
15838/**
15839 * Interface for HM and EM to emulate the VMSAVE instruction.
15840 *
15841 * @returns Strict VBox status code.
15842 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15843 * @param cbInstr The instruction length in bytes.
15844 * @thread EMT(pVCpu)
15845 */
15846VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15847{
15848 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15849
15850 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15851 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15852 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15853}
15854
15855
15856/**
15857 * Interface for HM and EM to emulate the INVLPGA instruction.
15858 *
15859 * @returns Strict VBox status code.
15860 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15861 * @param cbInstr The instruction length in bytes.
15862 * @thread EMT(pVCpu)
15863 */
15864VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15865{
15866 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15867
15868 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15869 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15870 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15871}
15872#endif /* VBOX_WITH_NESTED_HWVIRT */
15873
15874#ifdef IN_RING3
15875
15876/**
15877 * Handles the unlikely and probably fatal merge cases.
15878 *
15879 * @returns Merged status code.
15880 * @param rcStrict Current EM status code.
15881 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15882 * with @a rcStrict.
15883 * @param iMemMap The memory mapping index. For error reporting only.
15884 * @param pVCpu The cross context virtual CPU structure of the calling
15885 * thread, for error reporting only.
15886 */
15887DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15888 unsigned iMemMap, PVMCPU pVCpu)
15889{
15890 if (RT_FAILURE_NP(rcStrict))
15891 return rcStrict;
15892
15893 if (RT_FAILURE_NP(rcStrictCommit))
15894 return rcStrictCommit;
15895
15896 if (rcStrict == rcStrictCommit)
15897 return rcStrictCommit;
15898
15899 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15900 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15901 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15902 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15903 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15904 return VERR_IOM_FF_STATUS_IPE;
15905}
15906
15907
15908/**
15909 * Helper for IOMR3ProcessForceFlag.
15910 *
15911 * @returns Merged status code.
15912 * @param rcStrict Current EM status code.
15913 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15914 * with @a rcStrict.
15915 * @param iMemMap The memory mapping index. For error reporting only.
15916 * @param pVCpu The cross context virtual CPU structure of the calling
15917 * thread, for error reporting only.
15918 */
15919DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15920{
15921 /* Simple. */
15922 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15923 return rcStrictCommit;
15924
15925 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15926 return rcStrict;
15927
15928 /* EM scheduling status codes. */
15929 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15930 && rcStrict <= VINF_EM_LAST))
15931 {
15932 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15933 && rcStrictCommit <= VINF_EM_LAST))
15934 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15935 }
15936
15937 /* Unlikely */
15938 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15939}
15940
15941
15942/**
15943 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15944 *
15945 * @returns Merge between @a rcStrict and what the commit operation returned.
15946 * @param pVM The cross context VM structure.
15947 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15948 * @param rcStrict The status code returned by ring-0 or raw-mode.
15949 */
15950VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15951{
15952 /*
15953 * Reset the pending commit.
15954 */
15955 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15956 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15957 ("%#x %#x %#x\n",
15958 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15959 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15960
15961 /*
15962 * Commit the pending bounce buffers (usually just one).
15963 */
15964 unsigned cBufs = 0;
15965 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15966 while (iMemMap-- > 0)
15967 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15968 {
15969 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15970 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15971 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15972
15973 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15974 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15975 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15976
15977 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15978 {
15979 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15980 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15981 pbBuf,
15982 cbFirst,
15983 PGMACCESSORIGIN_IEM);
15984 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15985 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15986 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15987 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15988 }
15989
15990 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15991 {
15992 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15993 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15994 pbBuf + cbFirst,
15995 cbSecond,
15996 PGMACCESSORIGIN_IEM);
15997 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15998 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15999 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16000 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16001 }
16002 cBufs++;
16003 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16004 }
16005
16006 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16007 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16008 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16009 pVCpu->iem.s.cActiveMappings = 0;
16010 return rcStrict;
16011}
16012
16013#endif /* IN_RING3 */
16014
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette