VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 66686

Last change on this file since 66686 was 66686, checked in by vboxsync, 8 years ago

VMM/IEM: Handle raising of exceptions during delivery of a previous exception or interrupt.
The code takes into account additional info. required by HM for handling recursive exceptions as well.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 608.0 KB
Line 
1/* $Id: IEMAll.cpp 66686 2017-04-27 12:38:17Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76/** @def IEM_VERIFICATION_MODE_MINIMAL
77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode
78 * context. */
79#if defined(DOXYGEN_RUNNING)
80# define IEM_VERIFICATION_MODE_MINIMAL
81#endif
82//#define IEM_LOG_MEMORY_WRITES
83#define IEM_IMPLEMENTS_TASKSWITCH
84
85/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
86#ifdef _MSC_VER
87# pragma warning(disable:4505)
88#endif
89
90
91/*********************************************************************************************************************************
92* Header Files *
93*********************************************************************************************************************************/
94#define LOG_GROUP LOG_GROUP_IEM
95#define VMCPU_INCL_CPUM_GST_CTX
96#include <VBox/vmm/iem.h>
97#include <VBox/vmm/cpum.h>
98#include <VBox/vmm/apic.h>
99#include <VBox/vmm/pdm.h>
100#include <VBox/vmm/pgm.h>
101#include <VBox/vmm/iom.h>
102#include <VBox/vmm/em.h>
103#include <VBox/vmm/hm.h>
104#ifdef VBOX_WITH_NESTED_HWVIRT
105# include <VBox/vmm/hm_svm.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#ifdef VBOX_WITH_RAW_MODE_NOT_R0
111# include <VBox/vmm/patm.h>
112# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
113# include <VBox/vmm/csam.h>
114# endif
115#endif
116#include "IEMInternal.h"
117#ifdef IEM_VERIFICATION_MODE_FULL
118# include <VBox/vmm/rem.h>
119# include <VBox/vmm/mm.h>
120#endif
121#include <VBox/vmm/vm.h>
122#include <VBox/log.h>
123#include <VBox/err.h>
124#include <VBox/param.h>
125#include <VBox/dis.h>
126#include <VBox/disopcode.h>
127#include <iprt/assert.h>
128#include <iprt/string.h>
129#include <iprt/x86.h>
130
131
132/*********************************************************************************************************************************
133* Structures and Typedefs *
134*********************************************************************************************************************************/
135/** @typedef PFNIEMOP
136 * Pointer to an opcode decoder function.
137 */
138
139/** @def FNIEMOP_DEF
140 * Define an opcode decoder function.
141 *
142 * We're using macors for this so that adding and removing parameters as well as
143 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
144 *
145 * @param a_Name The function name.
146 */
147
148/** @typedef PFNIEMOPRM
149 * Pointer to an opcode decoder function with RM byte.
150 */
151
152/** @def FNIEMOPRM_DEF
153 * Define an opcode decoder function with RM byte.
154 *
155 * We're using macors for this so that adding and removing parameters as well as
156 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
157 *
158 * @param a_Name The function name.
159 */
160
161#if defined(__GNUC__) && defined(RT_ARCH_X86)
162typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
163typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
164# define FNIEMOP_DEF(a_Name) \
165 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
166# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
167 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
168# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
169 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
170
171#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
172typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
173typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
174# define FNIEMOP_DEF(a_Name) \
175 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
176# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
177 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
178# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
179 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
180
181#elif defined(__GNUC__)
182typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
183typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
184# define FNIEMOP_DEF(a_Name) \
185 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
186# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
187 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
188# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
189 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
190
191#else
192typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
193typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
194# define FNIEMOP_DEF(a_Name) \
195 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
196# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
197 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
198# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
199 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
200
201#endif
202#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
203
204
205/**
206 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
207 */
208typedef union IEMSELDESC
209{
210 /** The legacy view. */
211 X86DESC Legacy;
212 /** The long mode view. */
213 X86DESC64 Long;
214} IEMSELDESC;
215/** Pointer to a selector descriptor table entry. */
216typedef IEMSELDESC *PIEMSELDESC;
217
218/**
219 * CPU exception classes.
220 */
221typedef enum IEMXCPTCLASS
222{
223 IEMXCPTCLASS_BENIGN,
224 IEMXCPTCLASS_CONTRIBUTORY,
225 IEMXCPTCLASS_PAGE_FAULT
226} IEMXCPTCLASS;
227
228
229/*********************************************************************************************************************************
230* Defined Constants And Macros *
231*********************************************************************************************************************************/
232/** @def IEM_WITH_SETJMP
233 * Enables alternative status code handling using setjmps.
234 *
235 * This adds a bit of expense via the setjmp() call since it saves all the
236 * non-volatile registers. However, it eliminates return code checks and allows
237 * for more optimal return value passing (return regs instead of stack buffer).
238 */
239#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
240# define IEM_WITH_SETJMP
241#endif
242
243/** Temporary hack to disable the double execution. Will be removed in favor
244 * of a dedicated execution mode in EM. */
245//#define IEM_VERIFICATION_MODE_NO_REM
246
247/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
248 * due to GCC lacking knowledge about the value range of a switch. */
249#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
250
251/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
252#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
253
254/**
255 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
256 * occation.
257 */
258#ifdef LOG_ENABLED
259# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
260 do { \
261 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
262 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
263 } while (0)
264#else
265# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
266 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
267#endif
268
269/**
270 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
271 * occation using the supplied logger statement.
272 *
273 * @param a_LoggerArgs What to log on failure.
274 */
275#ifdef LOG_ENABLED
276# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
277 do { \
278 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
279 /*LogFunc(a_LoggerArgs);*/ \
280 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
281 } while (0)
282#else
283# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
284 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
285#endif
286
287/**
288 * Call an opcode decoder function.
289 *
290 * We're using macors for this so that adding and removing parameters can be
291 * done as we please. See FNIEMOP_DEF.
292 */
293#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
294
295/**
296 * Call a common opcode decoder function taking one extra argument.
297 *
298 * We're using macors for this so that adding and removing parameters can be
299 * done as we please. See FNIEMOP_DEF_1.
300 */
301#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
302
303/**
304 * Call a common opcode decoder function taking one extra argument.
305 *
306 * We're using macors for this so that adding and removing parameters can be
307 * done as we please. See FNIEMOP_DEF_1.
308 */
309#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
310
311/**
312 * Check if we're currently executing in real or virtual 8086 mode.
313 *
314 * @returns @c true if it is, @c false if not.
315 * @param a_pVCpu The IEM state of the current CPU.
316 */
317#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
318
319/**
320 * Check if we're currently executing in virtual 8086 mode.
321 *
322 * @returns @c true if it is, @c false if not.
323 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
324 */
325#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
326
327/**
328 * Check if we're currently executing in long mode.
329 *
330 * @returns @c true if it is, @c false if not.
331 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
332 */
333#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
334
335/**
336 * Check if we're currently executing in real mode.
337 *
338 * @returns @c true if it is, @c false if not.
339 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
340 */
341#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
342
343/**
344 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
345 * @returns PCCPUMFEATURES
346 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
347 */
348#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
349
350/**
351 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
352 * @returns PCCPUMFEATURES
353 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
354 */
355#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
356
357/**
358 * Evaluates to true if we're presenting an Intel CPU to the guest.
359 */
360#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
361
362/**
363 * Evaluates to true if we're presenting an AMD CPU to the guest.
364 */
365#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
366
367/**
368 * Check if the address is canonical.
369 */
370#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
371
372/** @def IEM_USE_UNALIGNED_DATA_ACCESS
373 * Use unaligned accesses instead of elaborate byte assembly. */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
375# define IEM_USE_UNALIGNED_DATA_ACCESS
376#endif
377
378#ifdef VBOX_WITH_NESTED_HWVIRT
379/**
380 * Check the common SVM instruction preconditions.
381 */
382# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
383 do { \
384 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
385 { \
386 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
387 return iemRaiseUndefinedOpcode(pVCpu); \
388 } \
389 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
390 { \
391 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
392 return iemRaiseUndefinedOpcode(pVCpu); \
393 } \
394 if (pVCpu->iem.s.uCpl != 0) \
395 { \
396 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
397 return iemRaiseGeneralProtectionFault0(pVCpu); \
398 } \
399 } while (0)
400
401/**
402 * Check if an SVM is enabled.
403 */
404# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
405
406/**
407 * Check if an SVM control/instruction intercept is set.
408 */
409# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(IEM_GET_CTX(a_pVCpu), (a_Intercept)))
410
411/**
412 * Check if an SVM read CRx intercept is set.
413 */
414# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
415
416/**
417 * Check if an SVM write CRx intercept is set.
418 */
419# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uCr)))
420
421/**
422 * Check if an SVM read DRx intercept is set.
423 */
424# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
425
426/**
427 * Check if an SVM write DRx intercept is set.
428 */
429# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uDr)))
430
431/**
432 * Check if an SVM exception intercept is set.
433 */
434# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(IEM_GET_CTX(a_pVCpu), (a_uVector)))
435
436/**
437 * Invokes the SVM \#VMEXIT handler for the nested-guest.
438 */
439# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
440 do \
441 { \
442 VBOXSTRICTRC rcStrictVmExit = HMSvmNstGstVmExit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), \
443 (a_uExitInfo2)); \
444 return rcStrictVmExit == VINF_SVM_VMEXIT ? VINF_SUCCESS : rcStrictVmExit; \
445 } while (0)
446
447/**
448 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
449 * corresponding decode assist information.
450 */
451# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
452 do \
453 { \
454 uint64_t uExitInfo1; \
455 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssist \
456 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
457 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
458 else \
459 uExitInfo1 = 0; \
460 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
461 } while (0)
462
463/**
464 * Checks and handles an SVM MSR intercept.
465 */
466# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) \
467 HMSvmNstGstHandleMsrIntercept((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_idMsr), (a_fWrite))
468
469#else
470# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
471# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
472# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
473# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
474# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
475# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
476# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
477# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
478# define IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
479# define IEM_RETURN_SVM_NST_GST_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
480# define IEM_SVM_NST_GST_MSR_INTERCEPT(a_pVCpu, a_idMsr, a_fWrite) (VERR_SVM_IPE_1)
481
482#endif /* VBOX_WITH_NESTED_HWVIRT */
483
484
485/*********************************************************************************************************************************
486* Global Variables *
487*********************************************************************************************************************************/
488extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
489
490
491/** Function table for the ADD instruction. */
492IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
493{
494 iemAImpl_add_u8, iemAImpl_add_u8_locked,
495 iemAImpl_add_u16, iemAImpl_add_u16_locked,
496 iemAImpl_add_u32, iemAImpl_add_u32_locked,
497 iemAImpl_add_u64, iemAImpl_add_u64_locked
498};
499
500/** Function table for the ADC instruction. */
501IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
502{
503 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
504 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
505 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
506 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
507};
508
509/** Function table for the SUB instruction. */
510IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
511{
512 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
513 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
514 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
515 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
516};
517
518/** Function table for the SBB instruction. */
519IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
520{
521 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
522 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
523 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
524 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
525};
526
527/** Function table for the OR instruction. */
528IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
529{
530 iemAImpl_or_u8, iemAImpl_or_u8_locked,
531 iemAImpl_or_u16, iemAImpl_or_u16_locked,
532 iemAImpl_or_u32, iemAImpl_or_u32_locked,
533 iemAImpl_or_u64, iemAImpl_or_u64_locked
534};
535
536/** Function table for the XOR instruction. */
537IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
538{
539 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
540 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
541 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
542 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
543};
544
545/** Function table for the AND instruction. */
546IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
547{
548 iemAImpl_and_u8, iemAImpl_and_u8_locked,
549 iemAImpl_and_u16, iemAImpl_and_u16_locked,
550 iemAImpl_and_u32, iemAImpl_and_u32_locked,
551 iemAImpl_and_u64, iemAImpl_and_u64_locked
552};
553
554/** Function table for the CMP instruction.
555 * @remarks Making operand order ASSUMPTIONS.
556 */
557IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
558{
559 iemAImpl_cmp_u8, NULL,
560 iemAImpl_cmp_u16, NULL,
561 iemAImpl_cmp_u32, NULL,
562 iemAImpl_cmp_u64, NULL
563};
564
565/** Function table for the TEST instruction.
566 * @remarks Making operand order ASSUMPTIONS.
567 */
568IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
569{
570 iemAImpl_test_u8, NULL,
571 iemAImpl_test_u16, NULL,
572 iemAImpl_test_u32, NULL,
573 iemAImpl_test_u64, NULL
574};
575
576/** Function table for the BT instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
578{
579 NULL, NULL,
580 iemAImpl_bt_u16, NULL,
581 iemAImpl_bt_u32, NULL,
582 iemAImpl_bt_u64, NULL
583};
584
585/** Function table for the BTC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
587{
588 NULL, NULL,
589 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
590 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
591 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
592};
593
594/** Function table for the BTR instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
596{
597 NULL, NULL,
598 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
599 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
600 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
601};
602
603/** Function table for the BTS instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
605{
606 NULL, NULL,
607 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
608 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
609 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
610};
611
612/** Function table for the BSF instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
614{
615 NULL, NULL,
616 iemAImpl_bsf_u16, NULL,
617 iemAImpl_bsf_u32, NULL,
618 iemAImpl_bsf_u64, NULL
619};
620
621/** Function table for the BSR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
623{
624 NULL, NULL,
625 iemAImpl_bsr_u16, NULL,
626 iemAImpl_bsr_u32, NULL,
627 iemAImpl_bsr_u64, NULL
628};
629
630/** Function table for the IMUL instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
632{
633 NULL, NULL,
634 iemAImpl_imul_two_u16, NULL,
635 iemAImpl_imul_two_u32, NULL,
636 iemAImpl_imul_two_u64, NULL
637};
638
639/** Group 1 /r lookup table. */
640IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
641{
642 &g_iemAImpl_add,
643 &g_iemAImpl_or,
644 &g_iemAImpl_adc,
645 &g_iemAImpl_sbb,
646 &g_iemAImpl_and,
647 &g_iemAImpl_sub,
648 &g_iemAImpl_xor,
649 &g_iemAImpl_cmp
650};
651
652/** Function table for the INC instruction. */
653IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
654{
655 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
656 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
657 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
658 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
659};
660
661/** Function table for the DEC instruction. */
662IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
663{
664 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
665 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
666 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
667 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
668};
669
670/** Function table for the NEG instruction. */
671IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
672{
673 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
674 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
675 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
676 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
677};
678
679/** Function table for the NOT instruction. */
680IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
681{
682 iemAImpl_not_u8, iemAImpl_not_u8_locked,
683 iemAImpl_not_u16, iemAImpl_not_u16_locked,
684 iemAImpl_not_u32, iemAImpl_not_u32_locked,
685 iemAImpl_not_u64, iemAImpl_not_u64_locked
686};
687
688
689/** Function table for the ROL instruction. */
690IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
691{
692 iemAImpl_rol_u8,
693 iemAImpl_rol_u16,
694 iemAImpl_rol_u32,
695 iemAImpl_rol_u64
696};
697
698/** Function table for the ROR instruction. */
699IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
700{
701 iemAImpl_ror_u8,
702 iemAImpl_ror_u16,
703 iemAImpl_ror_u32,
704 iemAImpl_ror_u64
705};
706
707/** Function table for the RCL instruction. */
708IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
709{
710 iemAImpl_rcl_u8,
711 iemAImpl_rcl_u16,
712 iemAImpl_rcl_u32,
713 iemAImpl_rcl_u64
714};
715
716/** Function table for the RCR instruction. */
717IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
718{
719 iemAImpl_rcr_u8,
720 iemAImpl_rcr_u16,
721 iemAImpl_rcr_u32,
722 iemAImpl_rcr_u64
723};
724
725/** Function table for the SHL instruction. */
726IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
727{
728 iemAImpl_shl_u8,
729 iemAImpl_shl_u16,
730 iemAImpl_shl_u32,
731 iemAImpl_shl_u64
732};
733
734/** Function table for the SHR instruction. */
735IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
736{
737 iemAImpl_shr_u8,
738 iemAImpl_shr_u16,
739 iemAImpl_shr_u32,
740 iemAImpl_shr_u64
741};
742
743/** Function table for the SAR instruction. */
744IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
745{
746 iemAImpl_sar_u8,
747 iemAImpl_sar_u16,
748 iemAImpl_sar_u32,
749 iemAImpl_sar_u64
750};
751
752
753/** Function table for the MUL instruction. */
754IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
755{
756 iemAImpl_mul_u8,
757 iemAImpl_mul_u16,
758 iemAImpl_mul_u32,
759 iemAImpl_mul_u64
760};
761
762/** Function table for the IMUL instruction working implicitly on rAX. */
763IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
764{
765 iemAImpl_imul_u8,
766 iemAImpl_imul_u16,
767 iemAImpl_imul_u32,
768 iemAImpl_imul_u64
769};
770
771/** Function table for the DIV instruction. */
772IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
773{
774 iemAImpl_div_u8,
775 iemAImpl_div_u16,
776 iemAImpl_div_u32,
777 iemAImpl_div_u64
778};
779
780/** Function table for the MUL instruction. */
781IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
782{
783 iemAImpl_idiv_u8,
784 iemAImpl_idiv_u16,
785 iemAImpl_idiv_u32,
786 iemAImpl_idiv_u64
787};
788
789/** Function table for the SHLD instruction */
790IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
791{
792 iemAImpl_shld_u16,
793 iemAImpl_shld_u32,
794 iemAImpl_shld_u64,
795};
796
797/** Function table for the SHRD instruction */
798IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
799{
800 iemAImpl_shrd_u16,
801 iemAImpl_shrd_u32,
802 iemAImpl_shrd_u64,
803};
804
805
806/** Function table for the PUNPCKLBW instruction */
807IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
808/** Function table for the PUNPCKLBD instruction */
809IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
810/** Function table for the PUNPCKLDQ instruction */
811IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
812/** Function table for the PUNPCKLQDQ instruction */
813IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
814
815/** Function table for the PUNPCKHBW instruction */
816IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
817/** Function table for the PUNPCKHBD instruction */
818IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
819/** Function table for the PUNPCKHDQ instruction */
820IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
821/** Function table for the PUNPCKHQDQ instruction */
822IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
823
824/** Function table for the PXOR instruction */
825IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
826/** Function table for the PCMPEQB instruction */
827IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
828/** Function table for the PCMPEQW instruction */
829IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
830/** Function table for the PCMPEQD instruction */
831IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
832
833
834#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
835/** What IEM just wrote. */
836uint8_t g_abIemWrote[256];
837/** How much IEM just wrote. */
838size_t g_cbIemWrote;
839#endif
840
841
842/*********************************************************************************************************************************
843* Internal Functions *
844*********************************************************************************************************************************/
845IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
846IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
847IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
848IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
849/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
850IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
851IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
852IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
853IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
854IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
855IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
856IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
857IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
858IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
859IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
860IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
861IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
862#ifdef IEM_WITH_SETJMP
863DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
864DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
865DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
866DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
867DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
868#endif
869
870IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
871IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
872IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
873IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
874IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
875IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
876IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
877IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
878IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
879IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
880IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
881IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
882IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
883IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
884IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
885IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
886
887#if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)
888IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);
889#endif
890IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);
891IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);
892
893#ifdef VBOX_WITH_NESTED_HWVIRT
894/**
895 * Checks if the intercepted IO instruction causes a \#VMEXIT and handles it
896 * accordingly.
897 *
898 * @returns VBox strict status code.
899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
900 * @param u16Port The IO port being accessed.
901 * @param enmIoType The type of IO access.
902 * @param cbReg The IO operand size in bytes.
903 * @param cAddrSizeBits The address size bits (for 16, 32 or 64).
904 * @param iEffSeg The effective segment number.
905 * @param fRep Whether this is a repeating IO instruction (REP prefix).
906 * @param fStrIo Whether this is a string IO instruction.
907 * @param cbInstr The length of the IO instruction in bytes.
908 *
909 * @remarks This must be called only when IO instructions are intercepted by the
910 * nested-guest hypervisor.
911 */
912IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
913 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
914{
915 Assert(IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IOIO_PROT));
916 Assert(cAddrSizeBits == 16 || cAddrSizeBits == 32 || cAddrSizeBits == 64);
917 Assert(cbReg == 1 || cbReg == 2 || cbReg == 4 || cbReg == 8);
918
919 static const uint32_t s_auIoOpSize[] = { SVM_IOIO_32_BIT_OP, SVM_IOIO_8_BIT_OP, SVM_IOIO_16_BIT_OP, 0, SVM_IOIO_32_BIT_OP, 0, 0, 0 };
920 static const uint32_t s_auIoAddrSize[] = { 0, SVM_IOIO_16_BIT_ADDR, SVM_IOIO_32_BIT_ADDR, 0, SVM_IOIO_64_BIT_ADDR, 0, 0, 0 };
921
922 SVMIOIOEXITINFO IoExitInfo;
923 IoExitInfo.u = s_auIoOpSize[cbReg & 7];
924 IoExitInfo.u |= s_auIoAddrSize[(cAddrSizeBits >> 4) & 7];
925 IoExitInfo.n.u1STR = fStrIo;
926 IoExitInfo.n.u1REP = fRep;
927 IoExitInfo.n.u3SEG = iEffSeg & 0x7;
928 IoExitInfo.n.u1Type = enmIoType;
929 IoExitInfo.n.u16Port = u16Port;
930
931 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
932 return HMSvmNstGstHandleIOIntercept(pVCpu, pCtx, &IoExitInfo, pCtx->rip + cbInstr);
933}
934
935#else
936IEM_STATIC VBOXSTRICTRC iemSvmHandleIOIntercept(PVMCPU pVCpu, uint16_t u16Port, SVMIOIOTYPE enmIoType, uint8_t cbReg,
937 uint8_t cAddrSizeBits, uint8_t iEffSeg, bool fRep, bool fStrIo, uint8_t cbInstr)
938{
939 RT_NOREF9(pVCpu, u16Port, enmIoType, cbReg, cAddrSizeBits, iEffSeg, fRep, fStrIo, cbInstr);
940 return VERR_IEM_IPE_9;
941}
942#endif /* VBOX_WITH_NESTED_HWVIRT */
943
944
945/**
946 * Sets the pass up status.
947 *
948 * @returns VINF_SUCCESS.
949 * @param pVCpu The cross context virtual CPU structure of the
950 * calling thread.
951 * @param rcPassUp The pass up status. Must be informational.
952 * VINF_SUCCESS is not allowed.
953 */
954IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
955{
956 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
957
958 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
959 if (rcOldPassUp == VINF_SUCCESS)
960 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
961 /* If both are EM scheduling codes, use EM priority rules. */
962 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
963 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
964 {
965 if (rcPassUp < rcOldPassUp)
966 {
967 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
968 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
969 }
970 else
971 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
972 }
973 /* Override EM scheduling with specific status code. */
974 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
975 {
976 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
977 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
978 }
979 /* Don't override specific status code, first come first served. */
980 else
981 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
982 return VINF_SUCCESS;
983}
984
985
986/**
987 * Calculates the CPU mode.
988 *
989 * This is mainly for updating IEMCPU::enmCpuMode.
990 *
991 * @returns CPU mode.
992 * @param pCtx The register context for the CPU.
993 */
994DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
995{
996 if (CPUMIsGuestIn64BitCodeEx(pCtx))
997 return IEMMODE_64BIT;
998 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
999 return IEMMODE_32BIT;
1000 return IEMMODE_16BIT;
1001}
1002
1003
1004/**
1005 * Initializes the execution state.
1006 *
1007 * @param pVCpu The cross context virtual CPU structure of the
1008 * calling thread.
1009 * @param fBypassHandlers Whether to bypass access handlers.
1010 *
1011 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1012 * side-effects in strict builds.
1013 */
1014DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1015{
1016 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1017
1018 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1019
1020#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1021 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1022 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1023 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1024 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1025 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1026 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1027 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1028 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1029#endif
1030
1031#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1032 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1033#endif
1034 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1035 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
1036#ifdef VBOX_STRICT
1037 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1038 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1039 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1040 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1041 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1042 pVCpu->iem.s.uRexReg = 127;
1043 pVCpu->iem.s.uRexB = 127;
1044 pVCpu->iem.s.uRexIndex = 127;
1045 pVCpu->iem.s.iEffSeg = 127;
1046 pVCpu->iem.s.idxPrefix = 127;
1047 pVCpu->iem.s.uVex3rdReg = 127;
1048 pVCpu->iem.s.uVexLength = 127;
1049 pVCpu->iem.s.fEvexStuff = 127;
1050 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1051# ifdef IEM_WITH_CODE_TLB
1052 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1053 pVCpu->iem.s.pbInstrBuf = NULL;
1054 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1055 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1056 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1057 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1058# else
1059 pVCpu->iem.s.offOpcode = 127;
1060 pVCpu->iem.s.cbOpcode = 127;
1061# endif
1062#endif
1063
1064 pVCpu->iem.s.cActiveMappings = 0;
1065 pVCpu->iem.s.iNextMapping = 0;
1066 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1067 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1068#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1069 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1070 && pCtx->cs.u64Base == 0
1071 && pCtx->cs.u32Limit == UINT32_MAX
1072 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1073 if (!pVCpu->iem.s.fInPatchCode)
1074 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1075#endif
1076
1077#ifdef IEM_VERIFICATION_MODE_FULL
1078 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;
1079 pVCpu->iem.s.fNoRem = true;
1080#endif
1081}
1082
1083
1084/**
1085 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1086 *
1087 * @param pVCpu The cross context virtual CPU structure of the
1088 * calling thread.
1089 */
1090DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1091{
1092 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1093#ifdef IEM_VERIFICATION_MODE_FULL
1094 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;
1095#endif
1096#ifdef VBOX_STRICT
1097# ifdef IEM_WITH_CODE_TLB
1098 NOREF(pVCpu);
1099# else
1100 pVCpu->iem.s.cbOpcode = 0;
1101# endif
1102#else
1103 NOREF(pVCpu);
1104#endif
1105}
1106
1107
1108/**
1109 * Initializes the decoder state.
1110 *
1111 * iemReInitDecoder is mostly a copy of this function.
1112 *
1113 * @param pVCpu The cross context virtual CPU structure of the
1114 * calling thread.
1115 * @param fBypassHandlers Whether to bypass access handlers.
1116 */
1117DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1118{
1119 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1120
1121 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1122
1123#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1124 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1125 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1126 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1127 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1128 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1129 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1130 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1131 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1132#endif
1133
1134#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1135 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1136#endif
1137 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1138#ifdef IEM_VERIFICATION_MODE_FULL
1139 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1140 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1141#endif
1142 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1143 pVCpu->iem.s.enmCpuMode = enmMode;
1144 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffAddrMode = enmMode;
1146 if (enmMode != IEMMODE_64BIT)
1147 {
1148 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffOpSize = enmMode;
1150 }
1151 else
1152 {
1153 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1154 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1155 }
1156 pVCpu->iem.s.fPrefixes = 0;
1157 pVCpu->iem.s.uRexReg = 0;
1158 pVCpu->iem.s.uRexB = 0;
1159 pVCpu->iem.s.uRexIndex = 0;
1160 pVCpu->iem.s.idxPrefix = 0;
1161 pVCpu->iem.s.uVex3rdReg = 0;
1162 pVCpu->iem.s.uVexLength = 0;
1163 pVCpu->iem.s.fEvexStuff = 0;
1164 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1165#ifdef IEM_WITH_CODE_TLB
1166 pVCpu->iem.s.pbInstrBuf = NULL;
1167 pVCpu->iem.s.offInstrNextByte = 0;
1168 pVCpu->iem.s.offCurInstrStart = 0;
1169# ifdef VBOX_STRICT
1170 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1171 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1172 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1173# endif
1174#else
1175 pVCpu->iem.s.offOpcode = 0;
1176 pVCpu->iem.s.cbOpcode = 0;
1177#endif
1178 pVCpu->iem.s.cActiveMappings = 0;
1179 pVCpu->iem.s.iNextMapping = 0;
1180 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1181 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1182#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1183 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1184 && pCtx->cs.u64Base == 0
1185 && pCtx->cs.u32Limit == UINT32_MAX
1186 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1187 if (!pVCpu->iem.s.fInPatchCode)
1188 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1189#endif
1190
1191#ifdef DBGFTRACE_ENABLED
1192 switch (enmMode)
1193 {
1194 case IEMMODE_64BIT:
1195 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1196 break;
1197 case IEMMODE_32BIT:
1198 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1199 break;
1200 case IEMMODE_16BIT:
1201 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1202 break;
1203 }
1204#endif
1205}
1206
1207
1208/**
1209 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1210 *
1211 * This is mostly a copy of iemInitDecoder.
1212 *
1213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1214 */
1215DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1216{
1217 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1218
1219 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1220
1221#if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))
1222 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1223 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1224 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1227 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1230#endif
1231
1232 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1233#ifdef IEM_VERIFICATION_MODE_FULL
1234 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)
1235 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;
1236#endif
1237 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1238 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1239 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1240 pVCpu->iem.s.enmEffAddrMode = enmMode;
1241 if (enmMode != IEMMODE_64BIT)
1242 {
1243 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1244 pVCpu->iem.s.enmEffOpSize = enmMode;
1245 }
1246 else
1247 {
1248 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1249 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1250 }
1251 pVCpu->iem.s.fPrefixes = 0;
1252 pVCpu->iem.s.uRexReg = 0;
1253 pVCpu->iem.s.uRexB = 0;
1254 pVCpu->iem.s.uRexIndex = 0;
1255 pVCpu->iem.s.idxPrefix = 0;
1256 pVCpu->iem.s.uVex3rdReg = 0;
1257 pVCpu->iem.s.uVexLength = 0;
1258 pVCpu->iem.s.fEvexStuff = 0;
1259 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1260#ifdef IEM_WITH_CODE_TLB
1261 if (pVCpu->iem.s.pbInstrBuf)
1262 {
1263 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1264 - pVCpu->iem.s.uInstrBufPc;
1265 if (off < pVCpu->iem.s.cbInstrBufTotal)
1266 {
1267 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1268 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1269 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1270 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1271 else
1272 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1273 }
1274 else
1275 {
1276 pVCpu->iem.s.pbInstrBuf = NULL;
1277 pVCpu->iem.s.offInstrNextByte = 0;
1278 pVCpu->iem.s.offCurInstrStart = 0;
1279 pVCpu->iem.s.cbInstrBuf = 0;
1280 pVCpu->iem.s.cbInstrBufTotal = 0;
1281 }
1282 }
1283 else
1284 {
1285 pVCpu->iem.s.offInstrNextByte = 0;
1286 pVCpu->iem.s.offCurInstrStart = 0;
1287 pVCpu->iem.s.cbInstrBuf = 0;
1288 pVCpu->iem.s.cbInstrBufTotal = 0;
1289 }
1290#else
1291 pVCpu->iem.s.cbOpcode = 0;
1292 pVCpu->iem.s.offOpcode = 0;
1293#endif
1294 Assert(pVCpu->iem.s.cActiveMappings == 0);
1295 pVCpu->iem.s.iNextMapping = 0;
1296 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1297 Assert(pVCpu->iem.s.fBypassHandlers == false);
1298#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1299 if (!pVCpu->iem.s.fInPatchCode)
1300 { /* likely */ }
1301 else
1302 {
1303 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1304 && pCtx->cs.u64Base == 0
1305 && pCtx->cs.u32Limit == UINT32_MAX
1306 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1307 if (!pVCpu->iem.s.fInPatchCode)
1308 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1309 }
1310#endif
1311
1312#ifdef DBGFTRACE_ENABLED
1313 switch (enmMode)
1314 {
1315 case IEMMODE_64BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1317 break;
1318 case IEMMODE_32BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1320 break;
1321 case IEMMODE_16BIT:
1322 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1323 break;
1324 }
1325#endif
1326}
1327
1328
1329
1330/**
1331 * Prefetch opcodes the first time when starting executing.
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pVCpu The cross context virtual CPU structure of the
1335 * calling thread.
1336 * @param fBypassHandlers Whether to bypass access handlers.
1337 */
1338IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1339{
1340#ifdef IEM_VERIFICATION_MODE_FULL
1341 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;
1342#endif
1343 iemInitDecoder(pVCpu, fBypassHandlers);
1344
1345#ifdef IEM_WITH_CODE_TLB
1346 /** @todo Do ITLB lookup here. */
1347
1348#else /* !IEM_WITH_CODE_TLB */
1349
1350 /*
1351 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1352 *
1353 * First translate CS:rIP to a physical address.
1354 */
1355 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1356 uint32_t cbToTryRead;
1357 RTGCPTR GCPtrPC;
1358 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1359 {
1360 cbToTryRead = PAGE_SIZE;
1361 GCPtrPC = pCtx->rip;
1362 if (IEM_IS_CANONICAL(GCPtrPC))
1363 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1364 else
1365 return iemRaiseGeneralProtectionFault0(pVCpu);
1366 }
1367 else
1368 {
1369 uint32_t GCPtrPC32 = pCtx->eip;
1370 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1371 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1372 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1373 else
1374 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1375 if (cbToTryRead) { /* likely */ }
1376 else /* overflowed */
1377 {
1378 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1379 cbToTryRead = UINT32_MAX;
1380 }
1381 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1382 Assert(GCPtrPC <= UINT32_MAX);
1383 }
1384
1385# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1386 /* Allow interpretation of patch manager code blocks since they can for
1387 instance throw #PFs for perfectly good reasons. */
1388 if (pVCpu->iem.s.fInPatchCode)
1389 {
1390 size_t cbRead = 0;
1391 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1392 AssertRCReturn(rc, rc);
1393 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1394 return VINF_SUCCESS;
1395 }
1396# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1397
1398 RTGCPHYS GCPhys;
1399 uint64_t fFlags;
1400 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1401 if (RT_SUCCESS(rc)) { /* probable */ }
1402 else
1403 {
1404 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1405 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1406 }
1407 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1408 else
1409 {
1410 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1411 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1412 }
1413 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1414 else
1415 {
1416 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1417 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1418 }
1419 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1420 /** @todo Check reserved bits and such stuff. PGM is better at doing
1421 * that, so do it when implementing the guest virtual address
1422 * TLB... */
1423
1424# ifdef IEM_VERIFICATION_MODE_FULL
1425 /*
1426 * Optimistic optimization: Use unconsumed opcode bytes from the previous
1427 * instruction.
1428 */
1429 /** @todo optimize this differently by not using PGMPhysRead. */
1430 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;
1431 pVCpu->iem.s.GCPhysOpcodes = GCPhys;
1432 if ( offPrevOpcodes < cbOldOpcodes
1433 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))
1434 {
1435 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;
1436 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));
1437 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);
1438 pVCpu->iem.s.cbOpcode = cbNew;
1439 return VINF_SUCCESS;
1440 }
1441# endif
1442
1443 /*
1444 * Read the bytes at this address.
1445 */
1446 PVM pVM = pVCpu->CTX_SUFF(pVM);
1447# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1448 size_t cbActual;
1449 if ( PATMIsEnabled(pVM)
1450 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1451 {
1452 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1453 Assert(cbActual > 0);
1454 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1455 }
1456 else
1457# endif
1458 {
1459 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1460 if (cbToTryRead > cbLeftOnPage)
1461 cbToTryRead = cbLeftOnPage;
1462 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1463 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1464
1465 if (!pVCpu->iem.s.fBypassHandlers)
1466 {
1467 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1468 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1469 { /* likely */ }
1470 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1471 {
1472 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1473 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1474 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1475 }
1476 else
1477 {
1478 Log((RT_SUCCESS(rcStrict)
1479 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1480 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1481 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1482 return rcStrict;
1483 }
1484 }
1485 else
1486 {
1487 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1488 if (RT_SUCCESS(rc))
1489 { /* likely */ }
1490 else
1491 {
1492 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1493 GCPtrPC, GCPhys, rc, cbToTryRead));
1494 return rc;
1495 }
1496 }
1497 pVCpu->iem.s.cbOpcode = cbToTryRead;
1498 }
1499#endif /* !IEM_WITH_CODE_TLB */
1500 return VINF_SUCCESS;
1501}
1502
1503
1504/**
1505 * Invalidates the IEM TLBs.
1506 *
1507 * This is called internally as well as by PGM when moving GC mappings.
1508 *
1509 * @returns
1510 * @param pVCpu The cross context virtual CPU structure of the calling
1511 * thread.
1512 * @param fVmm Set when PGM calls us with a remapping.
1513 */
1514VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1515{
1516#ifdef IEM_WITH_CODE_TLB
1517 pVCpu->iem.s.cbInstrBufTotal = 0;
1518 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1519 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1520 { /* very likely */ }
1521 else
1522 {
1523 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1524 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1525 while (i-- > 0)
1526 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1527 }
1528#endif
1529
1530#ifdef IEM_WITH_DATA_TLB
1531 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1532 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1533 { /* very likely */ }
1534 else
1535 {
1536 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1537 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1538 while (i-- > 0)
1539 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1540 }
1541#endif
1542 NOREF(pVCpu); NOREF(fVmm);
1543}
1544
1545
1546/**
1547 * Invalidates a page in the TLBs.
1548 *
1549 * @param pVCpu The cross context virtual CPU structure of the calling
1550 * thread.
1551 * @param GCPtr The address of the page to invalidate
1552 */
1553VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1554{
1555#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1556 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1557 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1558 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1559 uintptr_t idx = (uint8_t)GCPtr;
1560
1561# ifdef IEM_WITH_CODE_TLB
1562 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1563 {
1564 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1565 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1566 pVCpu->iem.s.cbInstrBufTotal = 0;
1567 }
1568# endif
1569
1570# ifdef IEM_WITH_DATA_TLB
1571 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1572 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1573# endif
1574#else
1575 NOREF(pVCpu); NOREF(GCPtr);
1576#endif
1577}
1578
1579
1580/**
1581 * Invalidates the host physical aspects of the IEM TLBs.
1582 *
1583 * This is called internally as well as by PGM when moving GC mappings.
1584 *
1585 * @param pVCpu The cross context virtual CPU structure of the calling
1586 * thread.
1587 */
1588VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1589{
1590#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1591 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1592
1593# ifdef IEM_WITH_CODE_TLB
1594 pVCpu->iem.s.cbInstrBufTotal = 0;
1595# endif
1596 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1597 if (uTlbPhysRev != 0)
1598 {
1599 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1600 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1601 }
1602 else
1603 {
1604 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1605 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1606
1607 unsigned i;
1608# ifdef IEM_WITH_CODE_TLB
1609 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1610 while (i-- > 0)
1611 {
1612 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1613 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1614 }
1615# endif
1616# ifdef IEM_WITH_DATA_TLB
1617 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1618 while (i-- > 0)
1619 {
1620 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1621 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1622 }
1623# endif
1624 }
1625#else
1626 NOREF(pVCpu);
1627#endif
1628}
1629
1630
1631/**
1632 * Invalidates the host physical aspects of the IEM TLBs.
1633 *
1634 * This is called internally as well as by PGM when moving GC mappings.
1635 *
1636 * @param pVM The cross context VM structure.
1637 *
1638 * @remarks Caller holds the PGM lock.
1639 */
1640VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1641{
1642 RT_NOREF_PV(pVM);
1643}
1644
1645#ifdef IEM_WITH_CODE_TLB
1646
1647/**
1648 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1649 * failure and jumps.
1650 *
1651 * We end up here for a number of reasons:
1652 * - pbInstrBuf isn't yet initialized.
1653 * - Advancing beyond the buffer boundrary (e.g. cross page).
1654 * - Advancing beyond the CS segment limit.
1655 * - Fetching from non-mappable page (e.g. MMIO).
1656 *
1657 * @param pVCpu The cross context virtual CPU structure of the
1658 * calling thread.
1659 * @param pvDst Where to return the bytes.
1660 * @param cbDst Number of bytes to read.
1661 *
1662 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1663 */
1664IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1665{
1666#ifdef IN_RING3
1667//__debugbreak();
1668 for (;;)
1669 {
1670 Assert(cbDst <= 8);
1671 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1672
1673 /*
1674 * We might have a partial buffer match, deal with that first to make the
1675 * rest simpler. This is the first part of the cross page/buffer case.
1676 */
1677 if (pVCpu->iem.s.pbInstrBuf != NULL)
1678 {
1679 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1680 {
1681 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1682 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1683 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1684
1685 cbDst -= cbCopy;
1686 pvDst = (uint8_t *)pvDst + cbCopy;
1687 offBuf += cbCopy;
1688 pVCpu->iem.s.offInstrNextByte += offBuf;
1689 }
1690 }
1691
1692 /*
1693 * Check segment limit, figuring how much we're allowed to access at this point.
1694 *
1695 * We will fault immediately if RIP is past the segment limit / in non-canonical
1696 * territory. If we do continue, there are one or more bytes to read before we
1697 * end up in trouble and we need to do that first before faulting.
1698 */
1699 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1700 RTGCPTR GCPtrFirst;
1701 uint32_t cbMaxRead;
1702 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1703 {
1704 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1705 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1706 { /* likely */ }
1707 else
1708 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1709 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1710 }
1711 else
1712 {
1713 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1714 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1715 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1716 { /* likely */ }
1717 else
1718 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1719 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1720 if (cbMaxRead != 0)
1721 { /* likely */ }
1722 else
1723 {
1724 /* Overflowed because address is 0 and limit is max. */
1725 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1726 cbMaxRead = X86_PAGE_SIZE;
1727 }
1728 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1729 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1730 if (cbMaxRead2 < cbMaxRead)
1731 cbMaxRead = cbMaxRead2;
1732 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1733 }
1734
1735 /*
1736 * Get the TLB entry for this piece of code.
1737 */
1738 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1739 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1740 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1741 if (pTlbe->uTag == uTag)
1742 {
1743 /* likely when executing lots of code, otherwise unlikely */
1744# ifdef VBOX_WITH_STATISTICS
1745 pVCpu->iem.s.CodeTlb.cTlbHits++;
1746# endif
1747 }
1748 else
1749 {
1750 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1751# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1752 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1753 {
1754 pTlbe->uTag = uTag;
1755 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1756 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1757 pTlbe->GCPhys = NIL_RTGCPHYS;
1758 pTlbe->pbMappingR3 = NULL;
1759 }
1760 else
1761# endif
1762 {
1763 RTGCPHYS GCPhys;
1764 uint64_t fFlags;
1765 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1766 if (RT_FAILURE(rc))
1767 {
1768 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1769 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1770 }
1771
1772 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1773 pTlbe->uTag = uTag;
1774 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1775 pTlbe->GCPhys = GCPhys;
1776 pTlbe->pbMappingR3 = NULL;
1777 }
1778 }
1779
1780 /*
1781 * Check TLB page table level access flags.
1782 */
1783 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1784 {
1785 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1786 {
1787 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1788 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1789 }
1790 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1791 {
1792 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1793 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1794 }
1795 }
1796
1797# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1798 /*
1799 * Allow interpretation of patch manager code blocks since they can for
1800 * instance throw #PFs for perfectly good reasons.
1801 */
1802 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1803 { /* no unlikely */ }
1804 else
1805 {
1806 /** @todo Could be optimized this a little in ring-3 if we liked. */
1807 size_t cbRead = 0;
1808 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1809 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1810 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1811 return;
1812 }
1813# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1814
1815 /*
1816 * Look up the physical page info if necessary.
1817 */
1818 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1819 { /* not necessary */ }
1820 else
1821 {
1822 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1823 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1824 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1825 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1826 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1827 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1828 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1829 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1830 }
1831
1832# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1833 /*
1834 * Try do a direct read using the pbMappingR3 pointer.
1835 */
1836 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1837 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1838 {
1839 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1840 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1841 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1842 {
1843 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1844 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1845 }
1846 else
1847 {
1848 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1849 Assert(cbInstr < cbMaxRead);
1850 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1851 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1852 }
1853 if (cbDst <= cbMaxRead)
1854 {
1855 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1856 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1857 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1858 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1859 return;
1860 }
1861 pVCpu->iem.s.pbInstrBuf = NULL;
1862
1863 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1864 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1865 }
1866 else
1867# endif
1868#if 0
1869 /*
1870 * If there is no special read handling, so we can read a bit more and
1871 * put it in the prefetch buffer.
1872 */
1873 if ( cbDst < cbMaxRead
1874 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1875 {
1876 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1877 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1878 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1879 { /* likely */ }
1880 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1881 {
1882 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1883 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1884 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1885 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1886 }
1887 else
1888 {
1889 Log((RT_SUCCESS(rcStrict)
1890 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1891 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1892 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1893 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1894 }
1895 }
1896 /*
1897 * Special read handling, so only read exactly what's needed.
1898 * This is a highly unlikely scenario.
1899 */
1900 else
1901#endif
1902 {
1903 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1904 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1905 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1906 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1907 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1908 { /* likely */ }
1909 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1910 {
1911 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1912 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1913 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1914 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1915 }
1916 else
1917 {
1918 Log((RT_SUCCESS(rcStrict)
1919 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1920 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1921 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1922 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1923 }
1924 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1925 if (cbToRead == cbDst)
1926 return;
1927 }
1928
1929 /*
1930 * More to read, loop.
1931 */
1932 cbDst -= cbMaxRead;
1933 pvDst = (uint8_t *)pvDst + cbMaxRead;
1934 }
1935#else
1936 RT_NOREF(pvDst, cbDst);
1937 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1938#endif
1939}
1940
1941#else
1942
1943/**
1944 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1945 * exception if it fails.
1946 *
1947 * @returns Strict VBox status code.
1948 * @param pVCpu The cross context virtual CPU structure of the
1949 * calling thread.
1950 * @param cbMin The minimum number of bytes relative offOpcode
1951 * that must be read.
1952 */
1953IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1954{
1955 /*
1956 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1957 *
1958 * First translate CS:rIP to a physical address.
1959 */
1960 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1961 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1962 uint32_t cbToTryRead;
1963 RTGCPTR GCPtrNext;
1964 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1965 {
1966 cbToTryRead = PAGE_SIZE;
1967 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1968 if (!IEM_IS_CANONICAL(GCPtrNext))
1969 return iemRaiseGeneralProtectionFault0(pVCpu);
1970 }
1971 else
1972 {
1973 uint32_t GCPtrNext32 = pCtx->eip;
1974 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1975 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1976 if (GCPtrNext32 > pCtx->cs.u32Limit)
1977 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1978 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1979 if (!cbToTryRead) /* overflowed */
1980 {
1981 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1982 cbToTryRead = UINT32_MAX;
1983 /** @todo check out wrapping around the code segment. */
1984 }
1985 if (cbToTryRead < cbMin - cbLeft)
1986 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1987 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1988 }
1989
1990 /* Only read up to the end of the page, and make sure we don't read more
1991 than the opcode buffer can hold. */
1992 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1993 if (cbToTryRead > cbLeftOnPage)
1994 cbToTryRead = cbLeftOnPage;
1995 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1996 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1997/** @todo r=bird: Convert assertion into undefined opcode exception? */
1998 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1999
2000# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2001 /* Allow interpretation of patch manager code blocks since they can for
2002 instance throw #PFs for perfectly good reasons. */
2003 if (pVCpu->iem.s.fInPatchCode)
2004 {
2005 size_t cbRead = 0;
2006 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2007 AssertRCReturn(rc, rc);
2008 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2009 return VINF_SUCCESS;
2010 }
2011# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2012
2013 RTGCPHYS GCPhys;
2014 uint64_t fFlags;
2015 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2016 if (RT_FAILURE(rc))
2017 {
2018 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2019 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2020 }
2021 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2022 {
2023 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2024 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2025 }
2026 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
2027 {
2028 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2029 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2030 }
2031 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2032 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2033 /** @todo Check reserved bits and such stuff. PGM is better at doing
2034 * that, so do it when implementing the guest virtual address
2035 * TLB... */
2036
2037 /*
2038 * Read the bytes at this address.
2039 *
2040 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2041 * and since PATM should only patch the start of an instruction there
2042 * should be no need to check again here.
2043 */
2044 if (!pVCpu->iem.s.fBypassHandlers)
2045 {
2046 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2047 cbToTryRead, PGMACCESSORIGIN_IEM);
2048 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2049 { /* likely */ }
2050 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2051 {
2052 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2053 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2054 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2055 }
2056 else
2057 {
2058 Log((RT_SUCCESS(rcStrict)
2059 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2060 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2061 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2062 return rcStrict;
2063 }
2064 }
2065 else
2066 {
2067 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2068 if (RT_SUCCESS(rc))
2069 { /* likely */ }
2070 else
2071 {
2072 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2073 return rc;
2074 }
2075 }
2076 pVCpu->iem.s.cbOpcode += cbToTryRead;
2077 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2078
2079 return VINF_SUCCESS;
2080}
2081
2082#endif /* !IEM_WITH_CODE_TLB */
2083#ifndef IEM_WITH_SETJMP
2084
2085/**
2086 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2087 *
2088 * @returns Strict VBox status code.
2089 * @param pVCpu The cross context virtual CPU structure of the
2090 * calling thread.
2091 * @param pb Where to return the opcode byte.
2092 */
2093DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2094{
2095 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2096 if (rcStrict == VINF_SUCCESS)
2097 {
2098 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2099 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2100 pVCpu->iem.s.offOpcode = offOpcode + 1;
2101 }
2102 else
2103 *pb = 0;
2104 return rcStrict;
2105}
2106
2107
2108/**
2109 * Fetches the next opcode byte.
2110 *
2111 * @returns Strict VBox status code.
2112 * @param pVCpu The cross context virtual CPU structure of the
2113 * calling thread.
2114 * @param pu8 Where to return the opcode byte.
2115 */
2116DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2117{
2118 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2119 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2120 {
2121 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2122 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2123 return VINF_SUCCESS;
2124 }
2125 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2126}
2127
2128#else /* IEM_WITH_SETJMP */
2129
2130/**
2131 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2132 *
2133 * @returns The opcode byte.
2134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2135 */
2136DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2137{
2138# ifdef IEM_WITH_CODE_TLB
2139 uint8_t u8;
2140 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2141 return u8;
2142# else
2143 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2144 if (rcStrict == VINF_SUCCESS)
2145 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2146 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2147# endif
2148}
2149
2150
2151/**
2152 * Fetches the next opcode byte, longjmp on error.
2153 *
2154 * @returns The opcode byte.
2155 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2156 */
2157DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2158{
2159# ifdef IEM_WITH_CODE_TLB
2160 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2161 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2162 if (RT_LIKELY( pbBuf != NULL
2163 && offBuf < pVCpu->iem.s.cbInstrBuf))
2164 {
2165 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2166 return pbBuf[offBuf];
2167 }
2168# else
2169 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2170 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2171 {
2172 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2173 return pVCpu->iem.s.abOpcode[offOpcode];
2174 }
2175# endif
2176 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2177}
2178
2179#endif /* IEM_WITH_SETJMP */
2180
2181/**
2182 * Fetches the next opcode byte, returns automatically on failure.
2183 *
2184 * @param a_pu8 Where to return the opcode byte.
2185 * @remark Implicitly references pVCpu.
2186 */
2187#ifndef IEM_WITH_SETJMP
2188# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2189 do \
2190 { \
2191 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2192 if (rcStrict2 == VINF_SUCCESS) \
2193 { /* likely */ } \
2194 else \
2195 return rcStrict2; \
2196 } while (0)
2197#else
2198# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2199#endif /* IEM_WITH_SETJMP */
2200
2201
2202#ifndef IEM_WITH_SETJMP
2203/**
2204 * Fetches the next signed byte from the opcode stream.
2205 *
2206 * @returns Strict VBox status code.
2207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2208 * @param pi8 Where to return the signed byte.
2209 */
2210DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2211{
2212 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2213}
2214#endif /* !IEM_WITH_SETJMP */
2215
2216
2217/**
2218 * Fetches the next signed byte from the opcode stream, returning automatically
2219 * on failure.
2220 *
2221 * @param a_pi8 Where to return the signed byte.
2222 * @remark Implicitly references pVCpu.
2223 */
2224#ifndef IEM_WITH_SETJMP
2225# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2226 do \
2227 { \
2228 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2229 if (rcStrict2 != VINF_SUCCESS) \
2230 return rcStrict2; \
2231 } while (0)
2232#else /* IEM_WITH_SETJMP */
2233# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2234
2235#endif /* IEM_WITH_SETJMP */
2236
2237#ifndef IEM_WITH_SETJMP
2238
2239/**
2240 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2241 *
2242 * @returns Strict VBox status code.
2243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2244 * @param pu16 Where to return the opcode dword.
2245 */
2246DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2247{
2248 uint8_t u8;
2249 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2250 if (rcStrict == VINF_SUCCESS)
2251 *pu16 = (int8_t)u8;
2252 return rcStrict;
2253}
2254
2255
2256/**
2257 * Fetches the next signed byte from the opcode stream, extending it to
2258 * unsigned 16-bit.
2259 *
2260 * @returns Strict VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2262 * @param pu16 Where to return the unsigned word.
2263 */
2264DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2265{
2266 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2267 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2268 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2269
2270 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2271 pVCpu->iem.s.offOpcode = offOpcode + 1;
2272 return VINF_SUCCESS;
2273}
2274
2275#endif /* !IEM_WITH_SETJMP */
2276
2277/**
2278 * Fetches the next signed byte from the opcode stream and sign-extending it to
2279 * a word, returning automatically on failure.
2280 *
2281 * @param a_pu16 Where to return the word.
2282 * @remark Implicitly references pVCpu.
2283 */
2284#ifndef IEM_WITH_SETJMP
2285# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2286 do \
2287 { \
2288 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2289 if (rcStrict2 != VINF_SUCCESS) \
2290 return rcStrict2; \
2291 } while (0)
2292#else
2293# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2294#endif
2295
2296#ifndef IEM_WITH_SETJMP
2297
2298/**
2299 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2300 *
2301 * @returns Strict VBox status code.
2302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2303 * @param pu32 Where to return the opcode dword.
2304 */
2305DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2306{
2307 uint8_t u8;
2308 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2309 if (rcStrict == VINF_SUCCESS)
2310 *pu32 = (int8_t)u8;
2311 return rcStrict;
2312}
2313
2314
2315/**
2316 * Fetches the next signed byte from the opcode stream, extending it to
2317 * unsigned 32-bit.
2318 *
2319 * @returns Strict VBox status code.
2320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2321 * @param pu32 Where to return the unsigned dword.
2322 */
2323DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2324{
2325 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2326 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2327 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2328
2329 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2330 pVCpu->iem.s.offOpcode = offOpcode + 1;
2331 return VINF_SUCCESS;
2332}
2333
2334#endif /* !IEM_WITH_SETJMP */
2335
2336/**
2337 * Fetches the next signed byte from the opcode stream and sign-extending it to
2338 * a word, returning automatically on failure.
2339 *
2340 * @param a_pu32 Where to return the word.
2341 * @remark Implicitly references pVCpu.
2342 */
2343#ifndef IEM_WITH_SETJMP
2344#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2345 do \
2346 { \
2347 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2348 if (rcStrict2 != VINF_SUCCESS) \
2349 return rcStrict2; \
2350 } while (0)
2351#else
2352# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2353#endif
2354
2355#ifndef IEM_WITH_SETJMP
2356
2357/**
2358 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2359 *
2360 * @returns Strict VBox status code.
2361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2362 * @param pu64 Where to return the opcode qword.
2363 */
2364DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2365{
2366 uint8_t u8;
2367 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2368 if (rcStrict == VINF_SUCCESS)
2369 *pu64 = (int8_t)u8;
2370 return rcStrict;
2371}
2372
2373
2374/**
2375 * Fetches the next signed byte from the opcode stream, extending it to
2376 * unsigned 64-bit.
2377 *
2378 * @returns Strict VBox status code.
2379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2380 * @param pu64 Where to return the unsigned qword.
2381 */
2382DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2383{
2384 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2385 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2386 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2387
2388 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2389 pVCpu->iem.s.offOpcode = offOpcode + 1;
2390 return VINF_SUCCESS;
2391}
2392
2393#endif /* !IEM_WITH_SETJMP */
2394
2395
2396/**
2397 * Fetches the next signed byte from the opcode stream and sign-extending it to
2398 * a word, returning automatically on failure.
2399 *
2400 * @param a_pu64 Where to return the word.
2401 * @remark Implicitly references pVCpu.
2402 */
2403#ifndef IEM_WITH_SETJMP
2404# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2405 do \
2406 { \
2407 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2408 if (rcStrict2 != VINF_SUCCESS) \
2409 return rcStrict2; \
2410 } while (0)
2411#else
2412# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2413#endif
2414
2415
2416#ifndef IEM_WITH_SETJMP
2417
2418/**
2419 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2420 *
2421 * @returns Strict VBox status code.
2422 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2423 * @param pu16 Where to return the opcode word.
2424 */
2425DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2426{
2427 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2428 if (rcStrict == VINF_SUCCESS)
2429 {
2430 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2431# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2432 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2433# else
2434 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2435# endif
2436 pVCpu->iem.s.offOpcode = offOpcode + 2;
2437 }
2438 else
2439 *pu16 = 0;
2440 return rcStrict;
2441}
2442
2443
2444/**
2445 * Fetches the next opcode word.
2446 *
2447 * @returns Strict VBox status code.
2448 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2449 * @param pu16 Where to return the opcode word.
2450 */
2451DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2452{
2453 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2454 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2455 {
2456 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2457# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2458 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2459# else
2460 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2461# endif
2462 return VINF_SUCCESS;
2463 }
2464 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2465}
2466
2467#else /* IEM_WITH_SETJMP */
2468
2469/**
2470 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2471 *
2472 * @returns The opcode word.
2473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2474 */
2475DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2476{
2477# ifdef IEM_WITH_CODE_TLB
2478 uint16_t u16;
2479 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2480 return u16;
2481# else
2482 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2483 if (rcStrict == VINF_SUCCESS)
2484 {
2485 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2486 pVCpu->iem.s.offOpcode += 2;
2487# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2488 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2489# else
2490 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2491# endif
2492 }
2493 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2494# endif
2495}
2496
2497
2498/**
2499 * Fetches the next opcode word, longjmp on error.
2500 *
2501 * @returns The opcode word.
2502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2503 */
2504DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2505{
2506# ifdef IEM_WITH_CODE_TLB
2507 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2508 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2509 if (RT_LIKELY( pbBuf != NULL
2510 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2511 {
2512 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2513# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2514 return *(uint16_t const *)&pbBuf[offBuf];
2515# else
2516 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2517# endif
2518 }
2519# else
2520 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2521 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2522 {
2523 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2524# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2525 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2526# else
2527 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2528# endif
2529 }
2530# endif
2531 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2532}
2533
2534#endif /* IEM_WITH_SETJMP */
2535
2536
2537/**
2538 * Fetches the next opcode word, returns automatically on failure.
2539 *
2540 * @param a_pu16 Where to return the opcode word.
2541 * @remark Implicitly references pVCpu.
2542 */
2543#ifndef IEM_WITH_SETJMP
2544# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2545 do \
2546 { \
2547 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2548 if (rcStrict2 != VINF_SUCCESS) \
2549 return rcStrict2; \
2550 } while (0)
2551#else
2552# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2553#endif
2554
2555#ifndef IEM_WITH_SETJMP
2556
2557/**
2558 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2559 *
2560 * @returns Strict VBox status code.
2561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2562 * @param pu32 Where to return the opcode double word.
2563 */
2564DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2565{
2566 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2567 if (rcStrict == VINF_SUCCESS)
2568 {
2569 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2570 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2571 pVCpu->iem.s.offOpcode = offOpcode + 2;
2572 }
2573 else
2574 *pu32 = 0;
2575 return rcStrict;
2576}
2577
2578
2579/**
2580 * Fetches the next opcode word, zero extending it to a double word.
2581 *
2582 * @returns Strict VBox status code.
2583 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2584 * @param pu32 Where to return the opcode double word.
2585 */
2586DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2587{
2588 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2589 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2590 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2591
2592 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2593 pVCpu->iem.s.offOpcode = offOpcode + 2;
2594 return VINF_SUCCESS;
2595}
2596
2597#endif /* !IEM_WITH_SETJMP */
2598
2599
2600/**
2601 * Fetches the next opcode word and zero extends it to a double word, returns
2602 * automatically on failure.
2603 *
2604 * @param a_pu32 Where to return the opcode double word.
2605 * @remark Implicitly references pVCpu.
2606 */
2607#ifndef IEM_WITH_SETJMP
2608# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2609 do \
2610 { \
2611 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2612 if (rcStrict2 != VINF_SUCCESS) \
2613 return rcStrict2; \
2614 } while (0)
2615#else
2616# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2617#endif
2618
2619#ifndef IEM_WITH_SETJMP
2620
2621/**
2622 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2623 *
2624 * @returns Strict VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 * @param pu64 Where to return the opcode quad word.
2627 */
2628DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2629{
2630 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2631 if (rcStrict == VINF_SUCCESS)
2632 {
2633 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2634 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2635 pVCpu->iem.s.offOpcode = offOpcode + 2;
2636 }
2637 else
2638 *pu64 = 0;
2639 return rcStrict;
2640}
2641
2642
2643/**
2644 * Fetches the next opcode word, zero extending it to a quad word.
2645 *
2646 * @returns Strict VBox status code.
2647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2648 * @param pu64 Where to return the opcode quad word.
2649 */
2650DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2651{
2652 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2653 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2654 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2655
2656 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2657 pVCpu->iem.s.offOpcode = offOpcode + 2;
2658 return VINF_SUCCESS;
2659}
2660
2661#endif /* !IEM_WITH_SETJMP */
2662
2663/**
2664 * Fetches the next opcode word and zero extends it to a quad word, returns
2665 * automatically on failure.
2666 *
2667 * @param a_pu64 Where to return the opcode quad word.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2680#endif
2681
2682
2683#ifndef IEM_WITH_SETJMP
2684/**
2685 * Fetches the next signed word from the opcode stream.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pi16 Where to return the signed word.
2690 */
2691DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2692{
2693 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2694}
2695#endif /* !IEM_WITH_SETJMP */
2696
2697
2698/**
2699 * Fetches the next signed word from the opcode stream, returning automatically
2700 * on failure.
2701 *
2702 * @param a_pi16 Where to return the signed word.
2703 * @remark Implicitly references pVCpu.
2704 */
2705#ifndef IEM_WITH_SETJMP
2706# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2707 do \
2708 { \
2709 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2710 if (rcStrict2 != VINF_SUCCESS) \
2711 return rcStrict2; \
2712 } while (0)
2713#else
2714# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2715#endif
2716
2717#ifndef IEM_WITH_SETJMP
2718
2719/**
2720 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2721 *
2722 * @returns Strict VBox status code.
2723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2724 * @param pu32 Where to return the opcode dword.
2725 */
2726DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2727{
2728 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2729 if (rcStrict == VINF_SUCCESS)
2730 {
2731 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2732# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2733 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2734# else
2735 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2736 pVCpu->iem.s.abOpcode[offOpcode + 1],
2737 pVCpu->iem.s.abOpcode[offOpcode + 2],
2738 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2739# endif
2740 pVCpu->iem.s.offOpcode = offOpcode + 4;
2741 }
2742 else
2743 *pu32 = 0;
2744 return rcStrict;
2745}
2746
2747
2748/**
2749 * Fetches the next opcode dword.
2750 *
2751 * @returns Strict VBox status code.
2752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2753 * @param pu32 Where to return the opcode double word.
2754 */
2755DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2756{
2757 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2758 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2759 {
2760 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2761# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2762 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2763# else
2764 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2765 pVCpu->iem.s.abOpcode[offOpcode + 1],
2766 pVCpu->iem.s.abOpcode[offOpcode + 2],
2767 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2768# endif
2769 return VINF_SUCCESS;
2770 }
2771 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2772}
2773
2774#else /* !IEM_WITH_SETJMP */
2775
2776/**
2777 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2778 *
2779 * @returns The opcode dword.
2780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2781 */
2782DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2783{
2784# ifdef IEM_WITH_CODE_TLB
2785 uint32_t u32;
2786 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2787 return u32;
2788# else
2789 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2790 if (rcStrict == VINF_SUCCESS)
2791 {
2792 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2793 pVCpu->iem.s.offOpcode = offOpcode + 4;
2794# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2795 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2796# else
2797 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2798 pVCpu->iem.s.abOpcode[offOpcode + 1],
2799 pVCpu->iem.s.abOpcode[offOpcode + 2],
2800 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2801# endif
2802 }
2803 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2804# endif
2805}
2806
2807
2808/**
2809 * Fetches the next opcode dword, longjmp on error.
2810 *
2811 * @returns The opcode dword.
2812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2813 */
2814DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2815{
2816# ifdef IEM_WITH_CODE_TLB
2817 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2818 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2819 if (RT_LIKELY( pbBuf != NULL
2820 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2821 {
2822 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2823# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2824 return *(uint32_t const *)&pbBuf[offBuf];
2825# else
2826 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2827 pbBuf[offBuf + 1],
2828 pbBuf[offBuf + 2],
2829 pbBuf[offBuf + 3]);
2830# endif
2831 }
2832# else
2833 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2834 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2835 {
2836 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2837# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2838 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2839# else
2840 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2841 pVCpu->iem.s.abOpcode[offOpcode + 1],
2842 pVCpu->iem.s.abOpcode[offOpcode + 2],
2843 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2844# endif
2845 }
2846# endif
2847 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2848}
2849
2850#endif /* !IEM_WITH_SETJMP */
2851
2852
2853/**
2854 * Fetches the next opcode dword, returns automatically on failure.
2855 *
2856 * @param a_pu32 Where to return the opcode dword.
2857 * @remark Implicitly references pVCpu.
2858 */
2859#ifndef IEM_WITH_SETJMP
2860# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2861 do \
2862 { \
2863 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2864 if (rcStrict2 != VINF_SUCCESS) \
2865 return rcStrict2; \
2866 } while (0)
2867#else
2868# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2869#endif
2870
2871#ifndef IEM_WITH_SETJMP
2872
2873/**
2874 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2875 *
2876 * @returns Strict VBox status code.
2877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2878 * @param pu64 Where to return the opcode dword.
2879 */
2880DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2881{
2882 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2883 if (rcStrict == VINF_SUCCESS)
2884 {
2885 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2886 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2887 pVCpu->iem.s.abOpcode[offOpcode + 1],
2888 pVCpu->iem.s.abOpcode[offOpcode + 2],
2889 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2890 pVCpu->iem.s.offOpcode = offOpcode + 4;
2891 }
2892 else
2893 *pu64 = 0;
2894 return rcStrict;
2895}
2896
2897
2898/**
2899 * Fetches the next opcode dword, zero extending it to a quad word.
2900 *
2901 * @returns Strict VBox status code.
2902 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2903 * @param pu64 Where to return the opcode quad word.
2904 */
2905DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2906{
2907 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2908 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2909 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2910
2911 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2912 pVCpu->iem.s.abOpcode[offOpcode + 1],
2913 pVCpu->iem.s.abOpcode[offOpcode + 2],
2914 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2915 pVCpu->iem.s.offOpcode = offOpcode + 4;
2916 return VINF_SUCCESS;
2917}
2918
2919#endif /* !IEM_WITH_SETJMP */
2920
2921
2922/**
2923 * Fetches the next opcode dword and zero extends it to a quad word, returns
2924 * automatically on failure.
2925 *
2926 * @param a_pu64 Where to return the opcode quad word.
2927 * @remark Implicitly references pVCpu.
2928 */
2929#ifndef IEM_WITH_SETJMP
2930# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2931 do \
2932 { \
2933 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2934 if (rcStrict2 != VINF_SUCCESS) \
2935 return rcStrict2; \
2936 } while (0)
2937#else
2938# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2939#endif
2940
2941
2942#ifndef IEM_WITH_SETJMP
2943/**
2944 * Fetches the next signed double word from the opcode stream.
2945 *
2946 * @returns Strict VBox status code.
2947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2948 * @param pi32 Where to return the signed double word.
2949 */
2950DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2951{
2952 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2953}
2954#endif
2955
2956/**
2957 * Fetches the next signed double word from the opcode stream, returning
2958 * automatically on failure.
2959 *
2960 * @param a_pi32 Where to return the signed double word.
2961 * @remark Implicitly references pVCpu.
2962 */
2963#ifndef IEM_WITH_SETJMP
2964# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2965 do \
2966 { \
2967 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2968 if (rcStrict2 != VINF_SUCCESS) \
2969 return rcStrict2; \
2970 } while (0)
2971#else
2972# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2973#endif
2974
2975#ifndef IEM_WITH_SETJMP
2976
2977/**
2978 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2979 *
2980 * @returns Strict VBox status code.
2981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2982 * @param pu64 Where to return the opcode qword.
2983 */
2984DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2985{
2986 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2987 if (rcStrict == VINF_SUCCESS)
2988 {
2989 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2990 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2991 pVCpu->iem.s.abOpcode[offOpcode + 1],
2992 pVCpu->iem.s.abOpcode[offOpcode + 2],
2993 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2994 pVCpu->iem.s.offOpcode = offOpcode + 4;
2995 }
2996 else
2997 *pu64 = 0;
2998 return rcStrict;
2999}
3000
3001
3002/**
3003 * Fetches the next opcode dword, sign extending it into a quad word.
3004 *
3005 * @returns Strict VBox status code.
3006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3007 * @param pu64 Where to return the opcode quad word.
3008 */
3009DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3010{
3011 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3012 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3013 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3014
3015 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3016 pVCpu->iem.s.abOpcode[offOpcode + 1],
3017 pVCpu->iem.s.abOpcode[offOpcode + 2],
3018 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3019 *pu64 = i32;
3020 pVCpu->iem.s.offOpcode = offOpcode + 4;
3021 return VINF_SUCCESS;
3022}
3023
3024#endif /* !IEM_WITH_SETJMP */
3025
3026
3027/**
3028 * Fetches the next opcode double word and sign extends it to a quad word,
3029 * returns automatically on failure.
3030 *
3031 * @param a_pu64 Where to return the opcode quad word.
3032 * @remark Implicitly references pVCpu.
3033 */
3034#ifndef IEM_WITH_SETJMP
3035# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3036 do \
3037 { \
3038 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3039 if (rcStrict2 != VINF_SUCCESS) \
3040 return rcStrict2; \
3041 } while (0)
3042#else
3043# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3044#endif
3045
3046#ifndef IEM_WITH_SETJMP
3047
3048/**
3049 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3050 *
3051 * @returns Strict VBox status code.
3052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3053 * @param pu64 Where to return the opcode qword.
3054 */
3055DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3056{
3057 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3058 if (rcStrict == VINF_SUCCESS)
3059 {
3060 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3061# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3062 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3063# else
3064 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3065 pVCpu->iem.s.abOpcode[offOpcode + 1],
3066 pVCpu->iem.s.abOpcode[offOpcode + 2],
3067 pVCpu->iem.s.abOpcode[offOpcode + 3],
3068 pVCpu->iem.s.abOpcode[offOpcode + 4],
3069 pVCpu->iem.s.abOpcode[offOpcode + 5],
3070 pVCpu->iem.s.abOpcode[offOpcode + 6],
3071 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3072# endif
3073 pVCpu->iem.s.offOpcode = offOpcode + 8;
3074 }
3075 else
3076 *pu64 = 0;
3077 return rcStrict;
3078}
3079
3080
3081/**
3082 * Fetches the next opcode qword.
3083 *
3084 * @returns Strict VBox status code.
3085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3086 * @param pu64 Where to return the opcode qword.
3087 */
3088DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3089{
3090 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3091 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3092 {
3093# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3094 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3095# else
3096 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3097 pVCpu->iem.s.abOpcode[offOpcode + 1],
3098 pVCpu->iem.s.abOpcode[offOpcode + 2],
3099 pVCpu->iem.s.abOpcode[offOpcode + 3],
3100 pVCpu->iem.s.abOpcode[offOpcode + 4],
3101 pVCpu->iem.s.abOpcode[offOpcode + 5],
3102 pVCpu->iem.s.abOpcode[offOpcode + 6],
3103 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3104# endif
3105 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3106 return VINF_SUCCESS;
3107 }
3108 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3109}
3110
3111#else /* IEM_WITH_SETJMP */
3112
3113/**
3114 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3115 *
3116 * @returns The opcode qword.
3117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3118 */
3119DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3120{
3121# ifdef IEM_WITH_CODE_TLB
3122 uint64_t u64;
3123 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3124 return u64;
3125# else
3126 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3127 if (rcStrict == VINF_SUCCESS)
3128 {
3129 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3130 pVCpu->iem.s.offOpcode = offOpcode + 8;
3131# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3132 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3133# else
3134 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3135 pVCpu->iem.s.abOpcode[offOpcode + 1],
3136 pVCpu->iem.s.abOpcode[offOpcode + 2],
3137 pVCpu->iem.s.abOpcode[offOpcode + 3],
3138 pVCpu->iem.s.abOpcode[offOpcode + 4],
3139 pVCpu->iem.s.abOpcode[offOpcode + 5],
3140 pVCpu->iem.s.abOpcode[offOpcode + 6],
3141 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3142# endif
3143 }
3144 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3145# endif
3146}
3147
3148
3149/**
3150 * Fetches the next opcode qword, longjmp on error.
3151 *
3152 * @returns The opcode qword.
3153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3154 */
3155DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3156{
3157# ifdef IEM_WITH_CODE_TLB
3158 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3159 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3160 if (RT_LIKELY( pbBuf != NULL
3161 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3162 {
3163 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3164# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3165 return *(uint64_t const *)&pbBuf[offBuf];
3166# else
3167 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3168 pbBuf[offBuf + 1],
3169 pbBuf[offBuf + 2],
3170 pbBuf[offBuf + 3],
3171 pbBuf[offBuf + 4],
3172 pbBuf[offBuf + 5],
3173 pbBuf[offBuf + 6],
3174 pbBuf[offBuf + 7]);
3175# endif
3176 }
3177# else
3178 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3179 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3180 {
3181 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3183 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3184# else
3185 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3186 pVCpu->iem.s.abOpcode[offOpcode + 1],
3187 pVCpu->iem.s.abOpcode[offOpcode + 2],
3188 pVCpu->iem.s.abOpcode[offOpcode + 3],
3189 pVCpu->iem.s.abOpcode[offOpcode + 4],
3190 pVCpu->iem.s.abOpcode[offOpcode + 5],
3191 pVCpu->iem.s.abOpcode[offOpcode + 6],
3192 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3193# endif
3194 }
3195# endif
3196 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3197}
3198
3199#endif /* IEM_WITH_SETJMP */
3200
3201/**
3202 * Fetches the next opcode quad word, returns automatically on failure.
3203 *
3204 * @param a_pu64 Where to return the opcode quad word.
3205 * @remark Implicitly references pVCpu.
3206 */
3207#ifndef IEM_WITH_SETJMP
3208# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3209 do \
3210 { \
3211 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3212 if (rcStrict2 != VINF_SUCCESS) \
3213 return rcStrict2; \
3214 } while (0)
3215#else
3216# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3217#endif
3218
3219
3220/** @name Misc Worker Functions.
3221 * @{
3222 */
3223
3224/**
3225 * Gets the exception class for the specified exception vector.
3226 *
3227 * @returns The class of the specified exception.
3228 * @param uVector The exception vector.
3229 */
3230IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3231{
3232 Assert(uVector <= X86_XCPT_LAST);
3233 switch (uVector)
3234 {
3235 case X86_XCPT_DE:
3236 case X86_XCPT_TS:
3237 case X86_XCPT_NP:
3238 case X86_XCPT_SS:
3239 case X86_XCPT_GP:
3240 case X86_XCPT_SX: /* AMD only */
3241 return IEMXCPTCLASS_CONTRIBUTORY;
3242
3243 case X86_XCPT_PF:
3244 case X86_XCPT_VE: /* Intel only */
3245 return IEMXCPTCLASS_PAGE_FAULT;
3246 }
3247 return IEMXCPTCLASS_BENIGN;
3248}
3249
3250
3251/**
3252 * Evaluates how to handle an exception caused during delivery of another event
3253 * (exception / interrupt).
3254 *
3255 * @returns How to handle the recursive exception.
3256 * @param pVCpu The cross context virtual CPU structure of the
3257 * calling thread.
3258 * @param fPrevFlags The flags of the previous event.
3259 * @param uPrevVector The vector of the previous event.
3260 * @param fCurFlags The flags of the current exception.
3261 * @param uCurVector The vector of the current exception.
3262 * @param pfXcptRaiseInfo Where to store additional information about the
3263 * exception condition. Optional.
3264 */
3265VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3266 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3267{
3268 /*
3269 * Only CPU exceptions can be raised while delivering other events, software interrupt
3270 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3271 */
3272 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3273 Assert(pVCpu);
3274
3275 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3276 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3277 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3278 {
3279 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3280 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3281 {
3282 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3283 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3284 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3285 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3286 {
3287 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3288 if (enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT)
3289 fRaiseInfo = IEMXCPTRAISEINFO_PF_PF;
3290 else
3291 fRaiseInfo = IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3292 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3293 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3294 }
3295 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3296 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3297 {
3298 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3299 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%u uCurVector=%u -> #DF\n", uPrevVector, uCurVector));
3300 }
3301 else if ( uPrevVector == X86_XCPT_DF
3302 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3303 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3304 {
3305 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3306 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3307 }
3308 }
3309 else
3310 {
3311 if ( uPrevVector == X86_XCPT_NMI
3312 && uCurVector == X86_XCPT_PF)
3313 {
3314 fRaiseInfo = IEMXCPTRAISEINFO_NMI_PF;
3315 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3316 }
3317 else if ( uPrevVector == X86_XCPT_AC
3318 && uCurVector == X86_XCPT_AC)
3319 {
3320 enmRaise = IEMXCPTRAISE_CPU_HANG;
3321 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3322 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3323 }
3324 }
3325 }
3326 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3327 {
3328 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3329 if (uCurVector == X86_XCPT_PF)
3330 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3331 }
3332 else
3333 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3334
3335 if (pfXcptRaiseInfo)
3336 *pfXcptRaiseInfo = fRaiseInfo;
3337 return enmRaise;
3338}
3339
3340
3341/**
3342 * Enters the CPU shutdown state initiated by a triple fault or other
3343 * unrecoverable conditions.
3344 *
3345 * @returns Strict VBox status code.
3346 * @param pVCpu The cross context virtual CPU structure of the
3347 * calling thread.
3348 */
3349IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3350{
3351 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3352 {
3353 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3354 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3355 }
3356
3357 RT_NOREF_PV(pVCpu);
3358 return VINF_EM_TRIPLE_FAULT;
3359}
3360
3361
3362#ifdef VBOX_WITH_NESTED_HWVIRT
3363IEM_STATIC VBOXSTRICTRC iemHandleSvmNstGstEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
3364 uint32_t uErr, uint64_t uCr2)
3365{
3366 Assert(IEM_IS_SVM_ENABLED(pVCpu));
3367
3368 /*
3369 * Handle nested-guest SVM exception and software interrupt intercepts,
3370 * see AMD spec. 15.12 "Exception Intercepts".
3371 *
3372 * - NMI intercepts have their own exit code and do not cause SVM_EXIT_EXCEPTION_2 #VMEXITs.
3373 * - External interrupts and software interrupts (INTn instruction) do not check the exception intercepts
3374 * even when they use a vector in the range 0 to 31.
3375 * - ICEBP should not trigger #DB intercept, but its own intercept.
3376 * - For #PF exceptions, its intercept is checked before CR2 is written by the exception.
3377 */
3378 /* Check NMI intercept */
3379 if ( u8Vector == X86_XCPT_NMI
3380 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3381 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_NMI))
3382 {
3383 Log2(("iemHandleSvmNstGstEventIntercept: NMI intercept -> #VMEXIT\n"));
3384 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3385 }
3386
3387 /* Check ICEBP intercept. */
3388 if ( (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)
3389 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_ICEBP))
3390 {
3391 Log2(("iemHandleSvmNstGstEventIntercept: ICEBP intercept -> #VMEXIT\n"));
3392 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_ICEBP, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3393 }
3394
3395 /* Check CPU exception intercepts. */
3396 if ( (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3397 && IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, u8Vector))
3398 {
3399 Assert(u8Vector <= X86_XCPT_LAST);
3400 uint64_t const uExitInfo1 = fFlags & IEM_XCPT_FLAGS_ERR ? uErr : 0;
3401 uint64_t const uExitInfo2 = fFlags & IEM_XCPT_FLAGS_CR2 ? uCr2 : 0;
3402 if ( IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist
3403 && u8Vector == X86_XCPT_PF
3404 && !(uErr & X86_TRAP_PF_ID))
3405 {
3406 /** @todo Nested-guest SVM - figure out fetching op-code bytes from IEM. */
3407#ifdef IEM_WITH_CODE_TLB
3408#else
3409 uint8_t const offOpCode = pVCpu->iem.s.offOpcode;
3410 uint8_t const cbCurrent = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode;
3411 if ( cbCurrent > 0
3412 && cbCurrent < sizeof(pCtx->hwvirt.svm.VmcbCtrl.abInstr))
3413 {
3414 Assert(cbCurrent <= sizeof(pVCpu->iem.s.abOpcode));
3415 memcpy(&pCtx->hwvirt.svm.VmcbCtrl.abInstr[0], &pVCpu->iem.s.abOpcode[offOpCode], cbCurrent);
3416 }
3417#endif
3418 }
3419 Log2(("iemHandleSvmNstGstEventIntercept: Xcpt intercept. u8Vector=%#x uExitInfo1=%#RX64, uExitInfo2=%#RX64 -> #VMEXIT\n",
3420 u8Vector, uExitInfo1, uExitInfo2));
3421 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + u8Vector, uExitInfo1, uExitInfo2);
3422 }
3423
3424 /* Check software interrupt (INTn) intercepts. */
3425 if ( (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
3426 | IEM_XCPT_FLAGS_BP_INSTR
3427 | IEM_XCPT_FLAGS_ICEBP_INSTR
3428 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
3429 && IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_INTN))
3430 {
3431 uint64_t const uExitInfo1 = IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSvmDecodeAssist ? u8Vector : 0;
3432 Log2(("iemHandleSvmNstGstEventIntercept: Software INT intercept (u8Vector=%#x) -> #VMEXIT\n", u8Vector));
3433 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_SWINT, uExitInfo1, 0 /* uExitInfo2 */);
3434 }
3435
3436 return VINF_HM_INTERCEPT_NOT_ACTIVE;
3437}
3438#endif
3439
3440/**
3441 * Validates a new SS segment.
3442 *
3443 * @returns VBox strict status code.
3444 * @param pVCpu The cross context virtual CPU structure of the
3445 * calling thread.
3446 * @param pCtx The CPU context.
3447 * @param NewSS The new SS selctor.
3448 * @param uCpl The CPL to load the stack for.
3449 * @param pDesc Where to return the descriptor.
3450 */
3451IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3452{
3453 NOREF(pCtx);
3454
3455 /* Null selectors are not allowed (we're not called for dispatching
3456 interrupts with SS=0 in long mode). */
3457 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3458 {
3459 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3460 return iemRaiseTaskSwitchFault0(pVCpu);
3461 }
3462
3463 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3464 if ((NewSS & X86_SEL_RPL) != uCpl)
3465 {
3466 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3467 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3468 }
3469
3470 /*
3471 * Read the descriptor.
3472 */
3473 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3474 if (rcStrict != VINF_SUCCESS)
3475 return rcStrict;
3476
3477 /*
3478 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3479 */
3480 if (!pDesc->Legacy.Gen.u1DescType)
3481 {
3482 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3483 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3484 }
3485
3486 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3487 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3488 {
3489 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3490 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3491 }
3492 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3493 {
3494 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3495 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3496 }
3497
3498 /* Is it there? */
3499 /** @todo testcase: Is this checked before the canonical / limit check below? */
3500 if (!pDesc->Legacy.Gen.u1Present)
3501 {
3502 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3503 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3504 }
3505
3506 return VINF_SUCCESS;
3507}
3508
3509
3510/**
3511 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3512 * not.
3513 *
3514 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3515 * @param a_pCtx The CPU context.
3516 */
3517#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3518# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3519 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \
3520 ? (a_pCtx)->eflags.u \
3521 : CPUMRawGetEFlags(a_pVCpu) )
3522#else
3523# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \
3524 ( (a_pCtx)->eflags.u )
3525#endif
3526
3527/**
3528 * Updates the EFLAGS in the correct manner wrt. PATM.
3529 *
3530 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3531 * @param a_pCtx The CPU context.
3532 * @param a_fEfl The new EFLAGS.
3533 */
3534#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3535# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3536 do { \
3537 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \
3538 (a_pCtx)->eflags.u = (a_fEfl); \
3539 else \
3540 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \
3541 } while (0)
3542#else
3543# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \
3544 do { \
3545 (a_pCtx)->eflags.u = (a_fEfl); \
3546 } while (0)
3547#endif
3548
3549
3550/** @} */
3551
3552/** @name Raising Exceptions.
3553 *
3554 * @{
3555 */
3556
3557
3558/**
3559 * Loads the specified stack far pointer from the TSS.
3560 *
3561 * @returns VBox strict status code.
3562 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3563 * @param pCtx The CPU context.
3564 * @param uCpl The CPL to load the stack for.
3565 * @param pSelSS Where to return the new stack segment.
3566 * @param puEsp Where to return the new stack pointer.
3567 */
3568IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3569 PRTSEL pSelSS, uint32_t *puEsp)
3570{
3571 VBOXSTRICTRC rcStrict;
3572 Assert(uCpl < 4);
3573
3574 switch (pCtx->tr.Attr.n.u4Type)
3575 {
3576 /*
3577 * 16-bit TSS (X86TSS16).
3578 */
3579 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); /* fall thru */
3580 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3581 {
3582 uint32_t off = uCpl * 4 + 2;
3583 if (off + 4 <= pCtx->tr.u32Limit)
3584 {
3585 /** @todo check actual access pattern here. */
3586 uint32_t u32Tmp = 0; /* gcc maybe... */
3587 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3588 if (rcStrict == VINF_SUCCESS)
3589 {
3590 *puEsp = RT_LOWORD(u32Tmp);
3591 *pSelSS = RT_HIWORD(u32Tmp);
3592 return VINF_SUCCESS;
3593 }
3594 }
3595 else
3596 {
3597 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3598 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3599 }
3600 break;
3601 }
3602
3603 /*
3604 * 32-bit TSS (X86TSS32).
3605 */
3606 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); /* fall thru */
3607 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3608 {
3609 uint32_t off = uCpl * 8 + 4;
3610 if (off + 7 <= pCtx->tr.u32Limit)
3611 {
3612/** @todo check actual access pattern here. */
3613 uint64_t u64Tmp;
3614 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3615 if (rcStrict == VINF_SUCCESS)
3616 {
3617 *puEsp = u64Tmp & UINT32_MAX;
3618 *pSelSS = (RTSEL)(u64Tmp >> 32);
3619 return VINF_SUCCESS;
3620 }
3621 }
3622 else
3623 {
3624 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3625 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3626 }
3627 break;
3628 }
3629
3630 default:
3631 AssertFailed();
3632 rcStrict = VERR_IEM_IPE_4;
3633 break;
3634 }
3635
3636 *puEsp = 0; /* make gcc happy */
3637 *pSelSS = 0; /* make gcc happy */
3638 return rcStrict;
3639}
3640
3641
3642/**
3643 * Loads the specified stack pointer from the 64-bit TSS.
3644 *
3645 * @returns VBox strict status code.
3646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3647 * @param pCtx The CPU context.
3648 * @param uCpl The CPL to load the stack for.
3649 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3650 * @param puRsp Where to return the new stack pointer.
3651 */
3652IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3653{
3654 Assert(uCpl < 4);
3655 Assert(uIst < 8);
3656 *puRsp = 0; /* make gcc happy */
3657
3658 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3659
3660 uint32_t off;
3661 if (uIst)
3662 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3663 else
3664 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3665 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3666 {
3667 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3668 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3669 }
3670
3671 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3672}
3673
3674
3675/**
3676 * Adjust the CPU state according to the exception being raised.
3677 *
3678 * @param pCtx The CPU context.
3679 * @param u8Vector The exception that has been raised.
3680 */
3681DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3682{
3683 switch (u8Vector)
3684 {
3685 case X86_XCPT_DB:
3686 pCtx->dr[7] &= ~X86_DR7_GD;
3687 break;
3688 /** @todo Read the AMD and Intel exception reference... */
3689 }
3690}
3691
3692
3693/**
3694 * Implements exceptions and interrupts for real mode.
3695 *
3696 * @returns VBox strict status code.
3697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3698 * @param pCtx The CPU context.
3699 * @param cbInstr The number of bytes to offset rIP by in the return
3700 * address.
3701 * @param u8Vector The interrupt / exception vector number.
3702 * @param fFlags The flags.
3703 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3704 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3705 */
3706IEM_STATIC VBOXSTRICTRC
3707iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3708 PCPUMCTX pCtx,
3709 uint8_t cbInstr,
3710 uint8_t u8Vector,
3711 uint32_t fFlags,
3712 uint16_t uErr,
3713 uint64_t uCr2)
3714{
3715 AssertReturn(pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT, VERR_IEM_IPE_6);
3716 NOREF(uErr); NOREF(uCr2);
3717
3718 /*
3719 * Read the IDT entry.
3720 */
3721 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3722 {
3723 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3724 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3725 }
3726 RTFAR16 Idte;
3727 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3728 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3729 return rcStrict;
3730
3731 /*
3732 * Push the stack frame.
3733 */
3734 uint16_t *pu16Frame;
3735 uint64_t uNewRsp;
3736 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3737 if (rcStrict != VINF_SUCCESS)
3738 return rcStrict;
3739
3740 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3741#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3742 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3743 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3744 fEfl |= UINT16_C(0xf000);
3745#endif
3746 pu16Frame[2] = (uint16_t)fEfl;
3747 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3748 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3749 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3750 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3751 return rcStrict;
3752
3753 /*
3754 * Load the vector address into cs:ip and make exception specific state
3755 * adjustments.
3756 */
3757 pCtx->cs.Sel = Idte.sel;
3758 pCtx->cs.ValidSel = Idte.sel;
3759 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3760 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3761 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3762 pCtx->rip = Idte.off;
3763 fEfl &= ~X86_EFL_IF;
3764 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3765
3766 /** @todo do we actually do this in real mode? */
3767 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3768 iemRaiseXcptAdjustState(pCtx, u8Vector);
3769
3770 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3771}
3772
3773
3774/**
3775 * Loads a NULL data selector into when coming from V8086 mode.
3776 *
3777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3778 * @param pSReg Pointer to the segment register.
3779 */
3780IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3781{
3782 pSReg->Sel = 0;
3783 pSReg->ValidSel = 0;
3784 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3785 {
3786 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3787 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3788 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3789 }
3790 else
3791 {
3792 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3793 /** @todo check this on AMD-V */
3794 pSReg->u64Base = 0;
3795 pSReg->u32Limit = 0;
3796 }
3797}
3798
3799
3800/**
3801 * Loads a segment selector during a task switch in V8086 mode.
3802 *
3803 * @param pSReg Pointer to the segment register.
3804 * @param uSel The selector value to load.
3805 */
3806IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3807{
3808 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3809 pSReg->Sel = uSel;
3810 pSReg->ValidSel = uSel;
3811 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3812 pSReg->u64Base = uSel << 4;
3813 pSReg->u32Limit = 0xffff;
3814 pSReg->Attr.u = 0xf3;
3815}
3816
3817
3818/**
3819 * Loads a NULL data selector into a selector register, both the hidden and
3820 * visible parts, in protected mode.
3821 *
3822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3823 * @param pSReg Pointer to the segment register.
3824 * @param uRpl The RPL.
3825 */
3826IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3827{
3828 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3829 * data selector in protected mode. */
3830 pSReg->Sel = uRpl;
3831 pSReg->ValidSel = uRpl;
3832 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3833 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3834 {
3835 /* VT-x (Intel 3960x) observed doing something like this. */
3836 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3837 pSReg->u32Limit = UINT32_MAX;
3838 pSReg->u64Base = 0;
3839 }
3840 else
3841 {
3842 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3843 pSReg->u32Limit = 0;
3844 pSReg->u64Base = 0;
3845 }
3846}
3847
3848
3849/**
3850 * Loads a segment selector during a task switch in protected mode.
3851 *
3852 * In this task switch scenario, we would throw \#TS exceptions rather than
3853 * \#GPs.
3854 *
3855 * @returns VBox strict status code.
3856 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3857 * @param pSReg Pointer to the segment register.
3858 * @param uSel The new selector value.
3859 *
3860 * @remarks This does _not_ handle CS or SS.
3861 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3862 */
3863IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3864{
3865 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3866
3867 /* Null data selector. */
3868 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3869 {
3870 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3871 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3872 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3873 return VINF_SUCCESS;
3874 }
3875
3876 /* Fetch the descriptor. */
3877 IEMSELDESC Desc;
3878 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3879 if (rcStrict != VINF_SUCCESS)
3880 {
3881 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3882 VBOXSTRICTRC_VAL(rcStrict)));
3883 return rcStrict;
3884 }
3885
3886 /* Must be a data segment or readable code segment. */
3887 if ( !Desc.Legacy.Gen.u1DescType
3888 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3889 {
3890 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3891 Desc.Legacy.Gen.u4Type));
3892 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3893 }
3894
3895 /* Check privileges for data segments and non-conforming code segments. */
3896 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3897 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3898 {
3899 /* The RPL and the new CPL must be less than or equal to the DPL. */
3900 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3901 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3902 {
3903 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3904 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3905 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3906 }
3907 }
3908
3909 /* Is it there? */
3910 if (!Desc.Legacy.Gen.u1Present)
3911 {
3912 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3913 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3914 }
3915
3916 /* The base and limit. */
3917 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3918 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3919
3920 /*
3921 * Ok, everything checked out fine. Now set the accessed bit before
3922 * committing the result into the registers.
3923 */
3924 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3925 {
3926 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3927 if (rcStrict != VINF_SUCCESS)
3928 return rcStrict;
3929 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3930 }
3931
3932 /* Commit */
3933 pSReg->Sel = uSel;
3934 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3935 pSReg->u32Limit = cbLimit;
3936 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3937 pSReg->ValidSel = uSel;
3938 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3939 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
3940 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3941
3942 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3943 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3944 return VINF_SUCCESS;
3945}
3946
3947
3948/**
3949 * Performs a task switch.
3950 *
3951 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3952 * caller is responsible for performing the necessary checks (like DPL, TSS
3953 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3954 * reference for JMP, CALL, IRET.
3955 *
3956 * If the task switch is the due to a software interrupt or hardware exception,
3957 * the caller is responsible for validating the TSS selector and descriptor. See
3958 * Intel Instruction reference for INT n.
3959 *
3960 * @returns VBox strict status code.
3961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3962 * @param pCtx The CPU context.
3963 * @param enmTaskSwitch What caused this task switch.
3964 * @param uNextEip The EIP effective after the task switch.
3965 * @param fFlags The flags.
3966 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3967 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3968 * @param SelTSS The TSS selector of the new task.
3969 * @param pNewDescTSS Pointer to the new TSS descriptor.
3970 */
3971IEM_STATIC VBOXSTRICTRC
3972iemTaskSwitch(PVMCPU pVCpu,
3973 PCPUMCTX pCtx,
3974 IEMTASKSWITCH enmTaskSwitch,
3975 uint32_t uNextEip,
3976 uint32_t fFlags,
3977 uint16_t uErr,
3978 uint64_t uCr2,
3979 RTSEL SelTSS,
3980 PIEMSELDESC pNewDescTSS)
3981{
3982 Assert(!IEM_IS_REAL_MODE(pVCpu));
3983 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3984
3985 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3986 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3987 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3988 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3989 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3990
3991 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3992 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3993
3994 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3995 fIsNewTSS386, pCtx->eip, uNextEip));
3996
3997 /* Update CR2 in case it's a page-fault. */
3998 /** @todo This should probably be done much earlier in IEM/PGM. See
3999 * @bugref{5653#c49}. */
4000 if (fFlags & IEM_XCPT_FLAGS_CR2)
4001 pCtx->cr2 = uCr2;
4002
4003 /*
4004 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4005 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4006 */
4007 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4008 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4009 if (uNewTSSLimit < uNewTSSLimitMin)
4010 {
4011 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4012 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4013 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4014 }
4015
4016 /*
4017 * Check the current TSS limit. The last written byte to the current TSS during the
4018 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4019 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4020 *
4021 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4022 * end up with smaller than "legal" TSS limits.
4023 */
4024 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
4025 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4026 if (uCurTSSLimit < uCurTSSLimitMin)
4027 {
4028 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4029 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4030 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4031 }
4032
4033 /*
4034 * Verify that the new TSS can be accessed and map it. Map only the required contents
4035 * and not the entire TSS.
4036 */
4037 void *pvNewTSS;
4038 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4039 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4040 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4041 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4042 * not perform correct translation if this happens. See Intel spec. 7.2.1
4043 * "Task-State Segment" */
4044 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4045 if (rcStrict != VINF_SUCCESS)
4046 {
4047 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4048 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4049 return rcStrict;
4050 }
4051
4052 /*
4053 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4054 */
4055 uint32_t u32EFlags = pCtx->eflags.u32;
4056 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4057 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4058 {
4059 PX86DESC pDescCurTSS;
4060 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4061 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4062 if (rcStrict != VINF_SUCCESS)
4063 {
4064 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4065 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4066 return rcStrict;
4067 }
4068
4069 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4070 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4071 if (rcStrict != VINF_SUCCESS)
4072 {
4073 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4074 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4075 return rcStrict;
4076 }
4077
4078 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4079 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4080 {
4081 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4082 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4083 u32EFlags &= ~X86_EFL_NT;
4084 }
4085 }
4086
4087 /*
4088 * Save the CPU state into the current TSS.
4089 */
4090 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
4091 if (GCPtrNewTSS == GCPtrCurTSS)
4092 {
4093 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4094 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4095 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4096 }
4097 if (fIsNewTSS386)
4098 {
4099 /*
4100 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4101 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4102 */
4103 void *pvCurTSS32;
4104 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4105 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4106 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4107 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4108 if (rcStrict != VINF_SUCCESS)
4109 {
4110 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4111 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4112 return rcStrict;
4113 }
4114
4115 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4116 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4117 pCurTSS32->eip = uNextEip;
4118 pCurTSS32->eflags = u32EFlags;
4119 pCurTSS32->eax = pCtx->eax;
4120 pCurTSS32->ecx = pCtx->ecx;
4121 pCurTSS32->edx = pCtx->edx;
4122 pCurTSS32->ebx = pCtx->ebx;
4123 pCurTSS32->esp = pCtx->esp;
4124 pCurTSS32->ebp = pCtx->ebp;
4125 pCurTSS32->esi = pCtx->esi;
4126 pCurTSS32->edi = pCtx->edi;
4127 pCurTSS32->es = pCtx->es.Sel;
4128 pCurTSS32->cs = pCtx->cs.Sel;
4129 pCurTSS32->ss = pCtx->ss.Sel;
4130 pCurTSS32->ds = pCtx->ds.Sel;
4131 pCurTSS32->fs = pCtx->fs.Sel;
4132 pCurTSS32->gs = pCtx->gs.Sel;
4133
4134 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4135 if (rcStrict != VINF_SUCCESS)
4136 {
4137 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4138 VBOXSTRICTRC_VAL(rcStrict)));
4139 return rcStrict;
4140 }
4141 }
4142 else
4143 {
4144 /*
4145 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4146 */
4147 void *pvCurTSS16;
4148 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4149 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4150 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4151 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4152 if (rcStrict != VINF_SUCCESS)
4153 {
4154 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4155 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4156 return rcStrict;
4157 }
4158
4159 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4160 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4161 pCurTSS16->ip = uNextEip;
4162 pCurTSS16->flags = u32EFlags;
4163 pCurTSS16->ax = pCtx->ax;
4164 pCurTSS16->cx = pCtx->cx;
4165 pCurTSS16->dx = pCtx->dx;
4166 pCurTSS16->bx = pCtx->bx;
4167 pCurTSS16->sp = pCtx->sp;
4168 pCurTSS16->bp = pCtx->bp;
4169 pCurTSS16->si = pCtx->si;
4170 pCurTSS16->di = pCtx->di;
4171 pCurTSS16->es = pCtx->es.Sel;
4172 pCurTSS16->cs = pCtx->cs.Sel;
4173 pCurTSS16->ss = pCtx->ss.Sel;
4174 pCurTSS16->ds = pCtx->ds.Sel;
4175
4176 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4177 if (rcStrict != VINF_SUCCESS)
4178 {
4179 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4180 VBOXSTRICTRC_VAL(rcStrict)));
4181 return rcStrict;
4182 }
4183 }
4184
4185 /*
4186 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4187 */
4188 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4189 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4190 {
4191 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4192 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4193 pNewTSS->selPrev = pCtx->tr.Sel;
4194 }
4195
4196 /*
4197 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4198 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4199 */
4200 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4201 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4202 bool fNewDebugTrap;
4203 if (fIsNewTSS386)
4204 {
4205 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4206 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4207 uNewEip = pNewTSS32->eip;
4208 uNewEflags = pNewTSS32->eflags;
4209 uNewEax = pNewTSS32->eax;
4210 uNewEcx = pNewTSS32->ecx;
4211 uNewEdx = pNewTSS32->edx;
4212 uNewEbx = pNewTSS32->ebx;
4213 uNewEsp = pNewTSS32->esp;
4214 uNewEbp = pNewTSS32->ebp;
4215 uNewEsi = pNewTSS32->esi;
4216 uNewEdi = pNewTSS32->edi;
4217 uNewES = pNewTSS32->es;
4218 uNewCS = pNewTSS32->cs;
4219 uNewSS = pNewTSS32->ss;
4220 uNewDS = pNewTSS32->ds;
4221 uNewFS = pNewTSS32->fs;
4222 uNewGS = pNewTSS32->gs;
4223 uNewLdt = pNewTSS32->selLdt;
4224 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4225 }
4226 else
4227 {
4228 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4229 uNewCr3 = 0;
4230 uNewEip = pNewTSS16->ip;
4231 uNewEflags = pNewTSS16->flags;
4232 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4233 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4234 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4235 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4236 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4237 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4238 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4239 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4240 uNewES = pNewTSS16->es;
4241 uNewCS = pNewTSS16->cs;
4242 uNewSS = pNewTSS16->ss;
4243 uNewDS = pNewTSS16->ds;
4244 uNewFS = 0;
4245 uNewGS = 0;
4246 uNewLdt = pNewTSS16->selLdt;
4247 fNewDebugTrap = false;
4248 }
4249
4250 if (GCPtrNewTSS == GCPtrCurTSS)
4251 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4252 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4253
4254 /*
4255 * We're done accessing the new TSS.
4256 */
4257 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4258 if (rcStrict != VINF_SUCCESS)
4259 {
4260 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4261 return rcStrict;
4262 }
4263
4264 /*
4265 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4266 */
4267 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4268 {
4269 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4270 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4271 if (rcStrict != VINF_SUCCESS)
4272 {
4273 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4274 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4275 return rcStrict;
4276 }
4277
4278 /* Check that the descriptor indicates the new TSS is available (not busy). */
4279 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4280 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4281 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4282
4283 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4284 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4285 if (rcStrict != VINF_SUCCESS)
4286 {
4287 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4288 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4289 return rcStrict;
4290 }
4291 }
4292
4293 /*
4294 * From this point on, we're technically in the new task. We will defer exceptions
4295 * until the completion of the task switch but before executing any instructions in the new task.
4296 */
4297 pCtx->tr.Sel = SelTSS;
4298 pCtx->tr.ValidSel = SelTSS;
4299 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4300 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4301 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4302 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4303 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4304
4305 /* Set the busy bit in TR. */
4306 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4307 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4308 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4309 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4310 {
4311 uNewEflags |= X86_EFL_NT;
4312 }
4313
4314 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4315 pCtx->cr0 |= X86_CR0_TS;
4316 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4317
4318 pCtx->eip = uNewEip;
4319 pCtx->eax = uNewEax;
4320 pCtx->ecx = uNewEcx;
4321 pCtx->edx = uNewEdx;
4322 pCtx->ebx = uNewEbx;
4323 pCtx->esp = uNewEsp;
4324 pCtx->ebp = uNewEbp;
4325 pCtx->esi = uNewEsi;
4326 pCtx->edi = uNewEdi;
4327
4328 uNewEflags &= X86_EFL_LIVE_MASK;
4329 uNewEflags |= X86_EFL_RA1_MASK;
4330 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4331
4332 /*
4333 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4334 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4335 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4336 */
4337 pCtx->es.Sel = uNewES;
4338 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4339
4340 pCtx->cs.Sel = uNewCS;
4341 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4342
4343 pCtx->ss.Sel = uNewSS;
4344 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4345
4346 pCtx->ds.Sel = uNewDS;
4347 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4348
4349 pCtx->fs.Sel = uNewFS;
4350 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4351
4352 pCtx->gs.Sel = uNewGS;
4353 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4354 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4355
4356 pCtx->ldtr.Sel = uNewLdt;
4357 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4358 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4359 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4360
4361 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4362 {
4363 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4364 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4365 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4366 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4367 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4368 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4369 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4370 }
4371
4372 /*
4373 * Switch CR3 for the new task.
4374 */
4375 if ( fIsNewTSS386
4376 && (pCtx->cr0 & X86_CR0_PG))
4377 {
4378 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4379 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4380 {
4381 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4382 AssertRCSuccessReturn(rc, rc);
4383 }
4384 else
4385 pCtx->cr3 = uNewCr3;
4386
4387 /* Inform PGM. */
4388 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu))
4389 {
4390 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4391 AssertRCReturn(rc, rc);
4392 /* ignore informational status codes */
4393 }
4394 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4395 }
4396
4397 /*
4398 * Switch LDTR for the new task.
4399 */
4400 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4401 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4402 else
4403 {
4404 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4405
4406 IEMSELDESC DescNewLdt;
4407 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4408 if (rcStrict != VINF_SUCCESS)
4409 {
4410 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4411 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4412 return rcStrict;
4413 }
4414 if ( !DescNewLdt.Legacy.Gen.u1Present
4415 || DescNewLdt.Legacy.Gen.u1DescType
4416 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4417 {
4418 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4419 uNewLdt, DescNewLdt.Legacy.u));
4420 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4421 }
4422
4423 pCtx->ldtr.ValidSel = uNewLdt;
4424 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4425 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4426 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4427 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4428 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4429 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4430 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4431 }
4432
4433 IEMSELDESC DescSS;
4434 if (IEM_IS_V86_MODE(pVCpu))
4435 {
4436 pVCpu->iem.s.uCpl = 3;
4437 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4438 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4439 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4440 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4441 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4442 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4443
4444 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4445 DescSS.Legacy.u = 0;
4446 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4447 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4448 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4449 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4450 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4451 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4452 DescSS.Legacy.Gen.u2Dpl = 3;
4453 }
4454 else
4455 {
4456 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4457
4458 /*
4459 * Load the stack segment for the new task.
4460 */
4461 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4462 {
4463 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4464 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4465 }
4466
4467 /* Fetch the descriptor. */
4468 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4469 if (rcStrict != VINF_SUCCESS)
4470 {
4471 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4472 VBOXSTRICTRC_VAL(rcStrict)));
4473 return rcStrict;
4474 }
4475
4476 /* SS must be a data segment and writable. */
4477 if ( !DescSS.Legacy.Gen.u1DescType
4478 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4479 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4480 {
4481 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4482 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4483 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4484 }
4485
4486 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4487 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4488 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4489 {
4490 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4491 uNewCpl));
4492 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4493 }
4494
4495 /* Is it there? */
4496 if (!DescSS.Legacy.Gen.u1Present)
4497 {
4498 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4499 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4500 }
4501
4502 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4503 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4504
4505 /* Set the accessed bit before committing the result into SS. */
4506 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4507 {
4508 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4509 if (rcStrict != VINF_SUCCESS)
4510 return rcStrict;
4511 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4512 }
4513
4514 /* Commit SS. */
4515 pCtx->ss.Sel = uNewSS;
4516 pCtx->ss.ValidSel = uNewSS;
4517 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4518 pCtx->ss.u32Limit = cbLimit;
4519 pCtx->ss.u64Base = u64Base;
4520 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4521 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4522
4523 /* CPL has changed, update IEM before loading rest of segments. */
4524 pVCpu->iem.s.uCpl = uNewCpl;
4525
4526 /*
4527 * Load the data segments for the new task.
4528 */
4529 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4530 if (rcStrict != VINF_SUCCESS)
4531 return rcStrict;
4532 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4533 if (rcStrict != VINF_SUCCESS)
4534 return rcStrict;
4535 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4536 if (rcStrict != VINF_SUCCESS)
4537 return rcStrict;
4538 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4539 if (rcStrict != VINF_SUCCESS)
4540 return rcStrict;
4541
4542 /*
4543 * Load the code segment for the new task.
4544 */
4545 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4546 {
4547 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4548 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4549 }
4550
4551 /* Fetch the descriptor. */
4552 IEMSELDESC DescCS;
4553 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4554 if (rcStrict != VINF_SUCCESS)
4555 {
4556 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4557 return rcStrict;
4558 }
4559
4560 /* CS must be a code segment. */
4561 if ( !DescCS.Legacy.Gen.u1DescType
4562 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4563 {
4564 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4565 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4566 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4567 }
4568
4569 /* For conforming CS, DPL must be less than or equal to the RPL. */
4570 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4571 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4572 {
4573 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4574 DescCS.Legacy.Gen.u2Dpl));
4575 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4576 }
4577
4578 /* For non-conforming CS, DPL must match RPL. */
4579 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4580 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4581 {
4582 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4583 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4584 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4585 }
4586
4587 /* Is it there? */
4588 if (!DescCS.Legacy.Gen.u1Present)
4589 {
4590 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4591 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4592 }
4593
4594 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4595 u64Base = X86DESC_BASE(&DescCS.Legacy);
4596
4597 /* Set the accessed bit before committing the result into CS. */
4598 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4599 {
4600 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4601 if (rcStrict != VINF_SUCCESS)
4602 return rcStrict;
4603 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4604 }
4605
4606 /* Commit CS. */
4607 pCtx->cs.Sel = uNewCS;
4608 pCtx->cs.ValidSel = uNewCS;
4609 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4610 pCtx->cs.u32Limit = cbLimit;
4611 pCtx->cs.u64Base = u64Base;
4612 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4613 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4614 }
4615
4616 /** @todo Debug trap. */
4617 if (fIsNewTSS386 && fNewDebugTrap)
4618 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4619
4620 /*
4621 * Construct the error code masks based on what caused this task switch.
4622 * See Intel Instruction reference for INT.
4623 */
4624 uint16_t uExt;
4625 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4626 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4627 {
4628 uExt = 1;
4629 }
4630 else
4631 uExt = 0;
4632
4633 /*
4634 * Push any error code on to the new stack.
4635 */
4636 if (fFlags & IEM_XCPT_FLAGS_ERR)
4637 {
4638 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4639 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4640 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4641
4642 /* Check that there is sufficient space on the stack. */
4643 /** @todo Factor out segment limit checking for normal/expand down segments
4644 * into a separate function. */
4645 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4646 {
4647 if ( pCtx->esp - 1 > cbLimitSS
4648 || pCtx->esp < cbStackFrame)
4649 {
4650 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4651 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4652 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4653 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4654 }
4655 }
4656 else
4657 {
4658 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4659 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4660 {
4661 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4662 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4663 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4664 }
4665 }
4666
4667
4668 if (fIsNewTSS386)
4669 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4670 else
4671 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4672 if (rcStrict != VINF_SUCCESS)
4673 {
4674 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4675 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4676 return rcStrict;
4677 }
4678 }
4679
4680 /* Check the new EIP against the new CS limit. */
4681 if (pCtx->eip > pCtx->cs.u32Limit)
4682 {
4683 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4684 pCtx->eip, pCtx->cs.u32Limit));
4685 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4686 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4687 }
4688
4689 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4690 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4691}
4692
4693
4694/**
4695 * Implements exceptions and interrupts for protected mode.
4696 *
4697 * @returns VBox strict status code.
4698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4699 * @param pCtx The CPU context.
4700 * @param cbInstr The number of bytes to offset rIP by in the return
4701 * address.
4702 * @param u8Vector The interrupt / exception vector number.
4703 * @param fFlags The flags.
4704 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4705 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4706 */
4707IEM_STATIC VBOXSTRICTRC
4708iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4709 PCPUMCTX pCtx,
4710 uint8_t cbInstr,
4711 uint8_t u8Vector,
4712 uint32_t fFlags,
4713 uint16_t uErr,
4714 uint64_t uCr2)
4715{
4716 /*
4717 * Read the IDT entry.
4718 */
4719 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4720 {
4721 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4722 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4723 }
4724 X86DESC Idte;
4725 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4726 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4727 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4728 return rcStrict;
4729 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4730 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4731 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4732
4733 /*
4734 * Check the descriptor type, DPL and such.
4735 * ASSUMES this is done in the same order as described for call-gate calls.
4736 */
4737 if (Idte.Gate.u1DescType)
4738 {
4739 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4740 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4741 }
4742 bool fTaskGate = false;
4743 uint8_t f32BitGate = true;
4744 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4745 switch (Idte.Gate.u4Type)
4746 {
4747 case X86_SEL_TYPE_SYS_UNDEFINED:
4748 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4749 case X86_SEL_TYPE_SYS_LDT:
4750 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4751 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4752 case X86_SEL_TYPE_SYS_UNDEFINED2:
4753 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4754 case X86_SEL_TYPE_SYS_UNDEFINED3:
4755 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4756 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4757 case X86_SEL_TYPE_SYS_UNDEFINED4:
4758 {
4759 /** @todo check what actually happens when the type is wrong...
4760 * esp. call gates. */
4761 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4762 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4763 }
4764
4765 case X86_SEL_TYPE_SYS_286_INT_GATE:
4766 f32BitGate = false;
4767 /* fall thru */
4768 case X86_SEL_TYPE_SYS_386_INT_GATE:
4769 fEflToClear |= X86_EFL_IF;
4770 break;
4771
4772 case X86_SEL_TYPE_SYS_TASK_GATE:
4773 fTaskGate = true;
4774#ifndef IEM_IMPLEMENTS_TASKSWITCH
4775 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4776#endif
4777 break;
4778
4779 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4780 f32BitGate = false;
4781 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4782 break;
4783
4784 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4785 }
4786
4787 /* Check DPL against CPL if applicable. */
4788 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4789 {
4790 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4791 {
4792 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4793 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4794 }
4795 }
4796
4797 /* Is it there? */
4798 if (!Idte.Gate.u1Present)
4799 {
4800 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4801 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4802 }
4803
4804 /* Is it a task-gate? */
4805 if (fTaskGate)
4806 {
4807 /*
4808 * Construct the error code masks based on what caused this task switch.
4809 * See Intel Instruction reference for INT.
4810 */
4811 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4812 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4813 RTSEL SelTSS = Idte.Gate.u16Sel;
4814
4815 /*
4816 * Fetch the TSS descriptor in the GDT.
4817 */
4818 IEMSELDESC DescTSS;
4819 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4820 if (rcStrict != VINF_SUCCESS)
4821 {
4822 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4823 VBOXSTRICTRC_VAL(rcStrict)));
4824 return rcStrict;
4825 }
4826
4827 /* The TSS descriptor must be a system segment and be available (not busy). */
4828 if ( DescTSS.Legacy.Gen.u1DescType
4829 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4830 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4831 {
4832 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4833 u8Vector, SelTSS, DescTSS.Legacy.au64));
4834 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4835 }
4836
4837 /* The TSS must be present. */
4838 if (!DescTSS.Legacy.Gen.u1Present)
4839 {
4840 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4841 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4842 }
4843
4844 /* Do the actual task switch. */
4845 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4846 }
4847
4848 /* A null CS is bad. */
4849 RTSEL NewCS = Idte.Gate.u16Sel;
4850 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4851 {
4852 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4853 return iemRaiseGeneralProtectionFault0(pVCpu);
4854 }
4855
4856 /* Fetch the descriptor for the new CS. */
4857 IEMSELDESC DescCS;
4858 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4859 if (rcStrict != VINF_SUCCESS)
4860 {
4861 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4862 return rcStrict;
4863 }
4864
4865 /* Must be a code segment. */
4866 if (!DescCS.Legacy.Gen.u1DescType)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4869 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4870 }
4871 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4872 {
4873 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4874 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4875 }
4876
4877 /* Don't allow lowering the privilege level. */
4878 /** @todo Does the lowering of privileges apply to software interrupts
4879 * only? This has bearings on the more-privileged or
4880 * same-privilege stack behavior further down. A testcase would
4881 * be nice. */
4882 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4883 {
4884 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4885 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4886 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4887 }
4888
4889 /* Make sure the selector is present. */
4890 if (!DescCS.Legacy.Gen.u1Present)
4891 {
4892 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4893 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4894 }
4895
4896 /* Check the new EIP against the new CS limit. */
4897 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4898 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4899 ? Idte.Gate.u16OffsetLow
4900 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4901 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4902 if (uNewEip > cbLimitCS)
4903 {
4904 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4905 u8Vector, uNewEip, cbLimitCS, NewCS));
4906 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4907 }
4908 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4909
4910 /* Calc the flag image to push. */
4911 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4912 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4913 fEfl &= ~X86_EFL_RF;
4914 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
4915 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4916
4917 /* From V8086 mode only go to CPL 0. */
4918 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4919 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4920 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4921 {
4922 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4923 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4924 }
4925
4926 /*
4927 * If the privilege level changes, we need to get a new stack from the TSS.
4928 * This in turns means validating the new SS and ESP...
4929 */
4930 if (uNewCpl != pVCpu->iem.s.uCpl)
4931 {
4932 RTSEL NewSS;
4933 uint32_t uNewEsp;
4934 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4935 if (rcStrict != VINF_SUCCESS)
4936 return rcStrict;
4937
4938 IEMSELDESC DescSS;
4939 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4940 if (rcStrict != VINF_SUCCESS)
4941 return rcStrict;
4942 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4943 if (!DescSS.Legacy.Gen.u1DefBig)
4944 {
4945 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4946 uNewEsp = (uint16_t)uNewEsp;
4947 }
4948
4949 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4950
4951 /* Check that there is sufficient space for the stack frame. */
4952 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4953 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4954 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4955 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4956
4957 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4958 {
4959 if ( uNewEsp - 1 > cbLimitSS
4960 || uNewEsp < cbStackFrame)
4961 {
4962 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4963 u8Vector, NewSS, uNewEsp, cbStackFrame));
4964 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4965 }
4966 }
4967 else
4968 {
4969 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4970 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4971 {
4972 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4973 u8Vector, NewSS, uNewEsp, cbStackFrame));
4974 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4975 }
4976 }
4977
4978 /*
4979 * Start making changes.
4980 */
4981
4982 /* Set the new CPL so that stack accesses use it. */
4983 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4984 pVCpu->iem.s.uCpl = uNewCpl;
4985
4986 /* Create the stack frame. */
4987 RTPTRUNION uStackFrame;
4988 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4989 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4990 if (rcStrict != VINF_SUCCESS)
4991 return rcStrict;
4992 void * const pvStackFrame = uStackFrame.pv;
4993 if (f32BitGate)
4994 {
4995 if (fFlags & IEM_XCPT_FLAGS_ERR)
4996 *uStackFrame.pu32++ = uErr;
4997 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4998 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4999 uStackFrame.pu32[2] = fEfl;
5000 uStackFrame.pu32[3] = pCtx->esp;
5001 uStackFrame.pu32[4] = pCtx->ss.Sel;
5002 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
5003 if (fEfl & X86_EFL_VM)
5004 {
5005 uStackFrame.pu32[1] = pCtx->cs.Sel;
5006 uStackFrame.pu32[5] = pCtx->es.Sel;
5007 uStackFrame.pu32[6] = pCtx->ds.Sel;
5008 uStackFrame.pu32[7] = pCtx->fs.Sel;
5009 uStackFrame.pu32[8] = pCtx->gs.Sel;
5010 }
5011 }
5012 else
5013 {
5014 if (fFlags & IEM_XCPT_FLAGS_ERR)
5015 *uStackFrame.pu16++ = uErr;
5016 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
5017 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5018 uStackFrame.pu16[2] = fEfl;
5019 uStackFrame.pu16[3] = pCtx->sp;
5020 uStackFrame.pu16[4] = pCtx->ss.Sel;
5021 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
5022 if (fEfl & X86_EFL_VM)
5023 {
5024 uStackFrame.pu16[1] = pCtx->cs.Sel;
5025 uStackFrame.pu16[5] = pCtx->es.Sel;
5026 uStackFrame.pu16[6] = pCtx->ds.Sel;
5027 uStackFrame.pu16[7] = pCtx->fs.Sel;
5028 uStackFrame.pu16[8] = pCtx->gs.Sel;
5029 }
5030 }
5031 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5032 if (rcStrict != VINF_SUCCESS)
5033 return rcStrict;
5034
5035 /* Mark the selectors 'accessed' (hope this is the correct time). */
5036 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5037 * after pushing the stack frame? (Write protect the gdt + stack to
5038 * find out.) */
5039 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5040 {
5041 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5042 if (rcStrict != VINF_SUCCESS)
5043 return rcStrict;
5044 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5045 }
5046
5047 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5048 {
5049 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5050 if (rcStrict != VINF_SUCCESS)
5051 return rcStrict;
5052 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5053 }
5054
5055 /*
5056 * Start comitting the register changes (joins with the DPL=CPL branch).
5057 */
5058 pCtx->ss.Sel = NewSS;
5059 pCtx->ss.ValidSel = NewSS;
5060 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5061 pCtx->ss.u32Limit = cbLimitSS;
5062 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5063 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5064 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5065 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5066 * SP is loaded).
5067 * Need to check the other combinations too:
5068 * - 16-bit TSS, 32-bit handler
5069 * - 32-bit TSS, 16-bit handler */
5070 if (!pCtx->ss.Attr.n.u1DefBig)
5071 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
5072 else
5073 pCtx->rsp = uNewEsp - cbStackFrame;
5074
5075 if (fEfl & X86_EFL_VM)
5076 {
5077 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
5078 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
5079 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
5080 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
5081 }
5082 }
5083 /*
5084 * Same privilege, no stack change and smaller stack frame.
5085 */
5086 else
5087 {
5088 uint64_t uNewRsp;
5089 RTPTRUNION uStackFrame;
5090 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5091 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5092 if (rcStrict != VINF_SUCCESS)
5093 return rcStrict;
5094 void * const pvStackFrame = uStackFrame.pv;
5095
5096 if (f32BitGate)
5097 {
5098 if (fFlags & IEM_XCPT_FLAGS_ERR)
5099 *uStackFrame.pu32++ = uErr;
5100 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5101 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5102 uStackFrame.pu32[2] = fEfl;
5103 }
5104 else
5105 {
5106 if (fFlags & IEM_XCPT_FLAGS_ERR)
5107 *uStackFrame.pu16++ = uErr;
5108 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5109 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5110 uStackFrame.pu16[2] = fEfl;
5111 }
5112 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5113 if (rcStrict != VINF_SUCCESS)
5114 return rcStrict;
5115
5116 /* Mark the CS selector as 'accessed'. */
5117 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5118 {
5119 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5120 if (rcStrict != VINF_SUCCESS)
5121 return rcStrict;
5122 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5123 }
5124
5125 /*
5126 * Start committing the register changes (joins with the other branch).
5127 */
5128 pCtx->rsp = uNewRsp;
5129 }
5130
5131 /* ... register committing continues. */
5132 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5133 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5134 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5135 pCtx->cs.u32Limit = cbLimitCS;
5136 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5137 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5138
5139 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5140 fEfl &= ~fEflToClear;
5141 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5142
5143 if (fFlags & IEM_XCPT_FLAGS_CR2)
5144 pCtx->cr2 = uCr2;
5145
5146 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5147 iemRaiseXcptAdjustState(pCtx, u8Vector);
5148
5149 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5150}
5151
5152
5153/**
5154 * Implements exceptions and interrupts for long mode.
5155 *
5156 * @returns VBox strict status code.
5157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5158 * @param pCtx The CPU context.
5159 * @param cbInstr The number of bytes to offset rIP by in the return
5160 * address.
5161 * @param u8Vector The interrupt / exception vector number.
5162 * @param fFlags The flags.
5163 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5164 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5165 */
5166IEM_STATIC VBOXSTRICTRC
5167iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5168 PCPUMCTX pCtx,
5169 uint8_t cbInstr,
5170 uint8_t u8Vector,
5171 uint32_t fFlags,
5172 uint16_t uErr,
5173 uint64_t uCr2)
5174{
5175 /*
5176 * Read the IDT entry.
5177 */
5178 uint16_t offIdt = (uint16_t)u8Vector << 4;
5179 if (pCtx->idtr.cbIdt < offIdt + 7)
5180 {
5181 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5182 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5183 }
5184 X86DESC64 Idte;
5185 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5186 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5187 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5188 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5189 return rcStrict;
5190 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5191 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5192 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5193
5194 /*
5195 * Check the descriptor type, DPL and such.
5196 * ASSUMES this is done in the same order as described for call-gate calls.
5197 */
5198 if (Idte.Gate.u1DescType)
5199 {
5200 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5201 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5202 }
5203 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5204 switch (Idte.Gate.u4Type)
5205 {
5206 case AMD64_SEL_TYPE_SYS_INT_GATE:
5207 fEflToClear |= X86_EFL_IF;
5208 break;
5209 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5210 break;
5211
5212 default:
5213 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5214 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5215 }
5216
5217 /* Check DPL against CPL if applicable. */
5218 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5219 {
5220 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5221 {
5222 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5223 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5224 }
5225 }
5226
5227 /* Is it there? */
5228 if (!Idte.Gate.u1Present)
5229 {
5230 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5231 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5232 }
5233
5234 /* A null CS is bad. */
5235 RTSEL NewCS = Idte.Gate.u16Sel;
5236 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5237 {
5238 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5239 return iemRaiseGeneralProtectionFault0(pVCpu);
5240 }
5241
5242 /* Fetch the descriptor for the new CS. */
5243 IEMSELDESC DescCS;
5244 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5245 if (rcStrict != VINF_SUCCESS)
5246 {
5247 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5248 return rcStrict;
5249 }
5250
5251 /* Must be a 64-bit code segment. */
5252 if (!DescCS.Long.Gen.u1DescType)
5253 {
5254 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5255 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5256 }
5257 if ( !DescCS.Long.Gen.u1Long
5258 || DescCS.Long.Gen.u1DefBig
5259 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5260 {
5261 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5262 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5263 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5264 }
5265
5266 /* Don't allow lowering the privilege level. For non-conforming CS
5267 selectors, the CS.DPL sets the privilege level the trap/interrupt
5268 handler runs at. For conforming CS selectors, the CPL remains
5269 unchanged, but the CS.DPL must be <= CPL. */
5270 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5271 * when CPU in Ring-0. Result \#GP? */
5272 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5273 {
5274 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5275 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5276 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5277 }
5278
5279
5280 /* Make sure the selector is present. */
5281 if (!DescCS.Legacy.Gen.u1Present)
5282 {
5283 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5284 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5285 }
5286
5287 /* Check that the new RIP is canonical. */
5288 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5289 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5290 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5291 if (!IEM_IS_CANONICAL(uNewRip))
5292 {
5293 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5294 return iemRaiseGeneralProtectionFault0(pVCpu);
5295 }
5296
5297 /*
5298 * If the privilege level changes or if the IST isn't zero, we need to get
5299 * a new stack from the TSS.
5300 */
5301 uint64_t uNewRsp;
5302 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5303 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5304 if ( uNewCpl != pVCpu->iem.s.uCpl
5305 || Idte.Gate.u3IST != 0)
5306 {
5307 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5308 if (rcStrict != VINF_SUCCESS)
5309 return rcStrict;
5310 }
5311 else
5312 uNewRsp = pCtx->rsp;
5313 uNewRsp &= ~(uint64_t)0xf;
5314
5315 /*
5316 * Calc the flag image to push.
5317 */
5318 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5319 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5320 fEfl &= ~X86_EFL_RF;
5321 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
5322 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5323
5324 /*
5325 * Start making changes.
5326 */
5327 /* Set the new CPL so that stack accesses use it. */
5328 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5329 pVCpu->iem.s.uCpl = uNewCpl;
5330
5331 /* Create the stack frame. */
5332 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5333 RTPTRUNION uStackFrame;
5334 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5335 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5336 if (rcStrict != VINF_SUCCESS)
5337 return rcStrict;
5338 void * const pvStackFrame = uStackFrame.pv;
5339
5340 if (fFlags & IEM_XCPT_FLAGS_ERR)
5341 *uStackFrame.pu64++ = uErr;
5342 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5343 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5344 uStackFrame.pu64[2] = fEfl;
5345 uStackFrame.pu64[3] = pCtx->rsp;
5346 uStackFrame.pu64[4] = pCtx->ss.Sel;
5347 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5348 if (rcStrict != VINF_SUCCESS)
5349 return rcStrict;
5350
5351 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5352 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5353 * after pushing the stack frame? (Write protect the gdt + stack to
5354 * find out.) */
5355 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5356 {
5357 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5358 if (rcStrict != VINF_SUCCESS)
5359 return rcStrict;
5360 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5361 }
5362
5363 /*
5364 * Start comitting the register changes.
5365 */
5366 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5367 * hidden registers when interrupting 32-bit or 16-bit code! */
5368 if (uNewCpl != uOldCpl)
5369 {
5370 pCtx->ss.Sel = 0 | uNewCpl;
5371 pCtx->ss.ValidSel = 0 | uNewCpl;
5372 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5373 pCtx->ss.u32Limit = UINT32_MAX;
5374 pCtx->ss.u64Base = 0;
5375 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5376 }
5377 pCtx->rsp = uNewRsp - cbStackFrame;
5378 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5379 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5380 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5381 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5382 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5383 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5384 pCtx->rip = uNewRip;
5385
5386 fEfl &= ~fEflToClear;
5387 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5388
5389 if (fFlags & IEM_XCPT_FLAGS_CR2)
5390 pCtx->cr2 = uCr2;
5391
5392 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5393 iemRaiseXcptAdjustState(pCtx, u8Vector);
5394
5395 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5396}
5397
5398
5399/**
5400 * Implements exceptions and interrupts.
5401 *
5402 * All exceptions and interrupts goes thru this function!
5403 *
5404 * @returns VBox strict status code.
5405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5406 * @param cbInstr The number of bytes to offset rIP by in the return
5407 * address.
5408 * @param u8Vector The interrupt / exception vector number.
5409 * @param fFlags The flags.
5410 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5411 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5412 */
5413DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5414iemRaiseXcptOrInt(PVMCPU pVCpu,
5415 uint8_t cbInstr,
5416 uint8_t u8Vector,
5417 uint32_t fFlags,
5418 uint16_t uErr,
5419 uint64_t uCr2)
5420{
5421 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5422#ifdef IN_RING0
5423 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5424 AssertRCReturn(rc, rc);
5425#endif
5426
5427#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5428 /*
5429 * Flush prefetch buffer
5430 */
5431 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5432#endif
5433
5434 /*
5435 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5436 */
5437 if ( pCtx->eflags.Bits.u1VM
5438 && pCtx->eflags.Bits.u2IOPL != 3
5439 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5440 && (pCtx->cr0 & X86_CR0_PE) )
5441 {
5442 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5443 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5444 u8Vector = X86_XCPT_GP;
5445 uErr = 0;
5446 }
5447#ifdef DBGFTRACE_ENABLED
5448 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5449 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5450 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5451#endif
5452
5453#ifdef VBOX_WITH_NESTED_HWVIRT
5454 if (IEM_IS_SVM_ENABLED(pVCpu))
5455 {
5456 /*
5457 * If the event is being injected as part of VMRUN, it isn't subject to event
5458 * intercepts in the nested-guest. However, secondary exceptions that occur
5459 * during injection of any event -are- subject to exception intercepts.
5460 * See AMD spec. 15.20 "Event Injection".
5461 */
5462 if (!pCtx->hwvirt.svm.fInterceptEvents)
5463 pCtx->hwvirt.svm.fInterceptEvents = 1;
5464 else
5465 {
5466 /*
5467 * Check and handle if the event being raised is intercepted.
5468 */
5469 VBOXSTRICTRC rcStrict0 = iemHandleSvmNstGstEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5470 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5471 return rcStrict0;
5472 }
5473 }
5474#endif /* VBOX_WITH_NESTED_HWVIRT */
5475
5476 /*
5477 * Do recursion accounting.
5478 */
5479 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5480 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5481 if (pVCpu->iem.s.cXcptRecursions == 0)
5482 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5483 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5484 else
5485 {
5486 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5487 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5488 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5489
5490 if (pVCpu->iem.s.cXcptRecursions >= 3)
5491 {
5492#ifdef DEBUG_bird
5493 AssertFailed();
5494#endif
5495 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5496 }
5497
5498 /*
5499 * Evaluate the sequence of recurring events.
5500 */
5501 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5502 NULL /* pXcptRaiseInfo */);
5503 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5504 { /* likely */ }
5505 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5506 {
5507 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5508 u8Vector = X86_XCPT_DF;
5509 uErr = 0;
5510 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5511 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5512 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_EXCEPTION_0 + X86_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5513 }
5514 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5515 {
5516 Log2(("iemRaiseXcptOrInt: raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5517 return iemInitiateCpuShutdown(pVCpu);
5518 }
5519 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5520 {
5521 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5522 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5523 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5524 return VERR_EM_GUEST_CPU_HANG;
5525 }
5526 else
5527 {
5528 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5529 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5530 return VERR_IEM_IPE_9;
5531 }
5532
5533 /*
5534 * The 'EXT' bit is set when an exception occurs during deliver of an external
5535 * event (such as an interrupt or earlier exception), see Intel spec. 6.13
5536 * "Error Code".
5537 *
5538 * For exceptions generated by software interrupts and INTO, INT3 instructions,
5539 * the 'EXT' bit will not be set, see Intel Instruction reference for INT n.
5540 */
5541 /** @todo Would INT1/ICEBP raised \#DB set the 'EXT' bit or not? Testcase... */
5542 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT))
5543 && (fFlags & IEM_XCPT_FLAGS_ERR)
5544 && u8Vector != X86_XCPT_PF
5545 && u8Vector != X86_XCPT_DF)
5546 {
5547 uErr |= X86_TRAP_ERR_EXTERNAL;
5548 }
5549 }
5550
5551 pVCpu->iem.s.cXcptRecursions++;
5552 pVCpu->iem.s.uCurXcpt = u8Vector;
5553 pVCpu->iem.s.fCurXcpt = fFlags;
5554 pVCpu->iem.s.uCurXcptErr = uErr;
5555 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5556
5557 /*
5558 * Extensive logging.
5559 */
5560#if defined(LOG_ENABLED) && defined(IN_RING3)
5561 if (LogIs3Enabled())
5562 {
5563 PVM pVM = pVCpu->CTX_SUFF(pVM);
5564 char szRegs[4096];
5565 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5566 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5567 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5568 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5569 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5570 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5571 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5572 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5573 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5574 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5575 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5576 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5577 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5578 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5579 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5580 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5581 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5582 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5583 " efer=%016VR{efer}\n"
5584 " pat=%016VR{pat}\n"
5585 " sf_mask=%016VR{sf_mask}\n"
5586 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5587 " lstar=%016VR{lstar}\n"
5588 " star=%016VR{star} cstar=%016VR{cstar}\n"
5589 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5590 );
5591
5592 char szInstr[256];
5593 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5594 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5595 szInstr, sizeof(szInstr), NULL);
5596 Log3(("%s%s\n", szRegs, szInstr));
5597 }
5598#endif /* LOG_ENABLED */
5599
5600 /*
5601 * Call the mode specific worker function.
5602 */
5603 VBOXSTRICTRC rcStrict;
5604 if (!(pCtx->cr0 & X86_CR0_PE))
5605 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5606 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5607 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5608 else
5609 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5610
5611 /* Flush the prefetch buffer. */
5612#ifdef IEM_WITH_CODE_TLB
5613 pVCpu->iem.s.pbInstrBuf = NULL;
5614#else
5615 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5616#endif
5617
5618 /*
5619 * Unwind.
5620 */
5621 pVCpu->iem.s.cXcptRecursions--;
5622 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5623 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5624 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u\n",
5625 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl));
5626 return rcStrict;
5627}
5628
5629#ifdef IEM_WITH_SETJMP
5630/**
5631 * See iemRaiseXcptOrInt. Will not return.
5632 */
5633IEM_STATIC DECL_NO_RETURN(void)
5634iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5635 uint8_t cbInstr,
5636 uint8_t u8Vector,
5637 uint32_t fFlags,
5638 uint16_t uErr,
5639 uint64_t uCr2)
5640{
5641 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5642 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5643}
5644#endif
5645
5646
5647/** \#DE - 00. */
5648DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5649{
5650 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5651}
5652
5653
5654/** \#DB - 01.
5655 * @note This automatically clear DR7.GD. */
5656DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5657{
5658 /** @todo set/clear RF. */
5659 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5660 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5661}
5662
5663
5664/** \#BR - 05. */
5665DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5666{
5667 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5668}
5669
5670
5671/** \#UD - 06. */
5672DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5673{
5674 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5675}
5676
5677
5678/** \#NM - 07. */
5679DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5680{
5681 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5682}
5683
5684
5685/** \#TS(err) - 0a. */
5686DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5687{
5688 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5689}
5690
5691
5692/** \#TS(tr) - 0a. */
5693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5694{
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5696 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5697}
5698
5699
5700/** \#TS(0) - 0a. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5704 0, 0);
5705}
5706
5707
5708/** \#TS(err) - 0a. */
5709DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5710{
5711 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5712 uSel & X86_SEL_MASK_OFF_RPL, 0);
5713}
5714
5715
5716/** \#NP(err) - 0b. */
5717DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5718{
5719 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5720}
5721
5722
5723/** \#NP(sel) - 0b. */
5724DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5725{
5726 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5727 uSel & ~X86_SEL_RPL, 0);
5728}
5729
5730
5731/** \#SS(seg) - 0c. */
5732DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5733{
5734 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5735 uSel & ~X86_SEL_RPL, 0);
5736}
5737
5738
5739/** \#SS(err) - 0c. */
5740DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5741{
5742 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5743}
5744
5745
5746/** \#GP(n) - 0d. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5748{
5749 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5750}
5751
5752
5753/** \#GP(0) - 0d. */
5754DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5755{
5756 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5757}
5758
5759#ifdef IEM_WITH_SETJMP
5760/** \#GP(0) - 0d. */
5761DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5762{
5763 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5764}
5765#endif
5766
5767
5768/** \#GP(sel) - 0d. */
5769DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5770{
5771 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5772 Sel & ~X86_SEL_RPL, 0);
5773}
5774
5775
5776/** \#GP(0) - 0d. */
5777DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5778{
5779 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5780}
5781
5782
5783/** \#GP(sel) - 0d. */
5784DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5785{
5786 NOREF(iSegReg); NOREF(fAccess);
5787 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5788 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5789}
5790
5791#ifdef IEM_WITH_SETJMP
5792/** \#GP(sel) - 0d, longjmp. */
5793DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5794{
5795 NOREF(iSegReg); NOREF(fAccess);
5796 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5797 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5798}
5799#endif
5800
5801/** \#GP(sel) - 0d. */
5802DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5803{
5804 NOREF(Sel);
5805 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5806}
5807
5808#ifdef IEM_WITH_SETJMP
5809/** \#GP(sel) - 0d, longjmp. */
5810DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5811{
5812 NOREF(Sel);
5813 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5814}
5815#endif
5816
5817
5818/** \#GP(sel) - 0d. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5820{
5821 NOREF(iSegReg); NOREF(fAccess);
5822 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5823}
5824
5825#ifdef IEM_WITH_SETJMP
5826/** \#GP(sel) - 0d, longjmp. */
5827DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5828 uint32_t fAccess)
5829{
5830 NOREF(iSegReg); NOREF(fAccess);
5831 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5832}
5833#endif
5834
5835
5836/** \#PF(n) - 0e. */
5837DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5838{
5839 uint16_t uErr;
5840 switch (rc)
5841 {
5842 case VERR_PAGE_NOT_PRESENT:
5843 case VERR_PAGE_TABLE_NOT_PRESENT:
5844 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5845 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5846 uErr = 0;
5847 break;
5848
5849 default:
5850 AssertMsgFailed(("%Rrc\n", rc));
5851 /* fall thru */
5852 case VERR_ACCESS_DENIED:
5853 uErr = X86_TRAP_PF_P;
5854 break;
5855
5856 /** @todo reserved */
5857 }
5858
5859 if (pVCpu->iem.s.uCpl == 3)
5860 uErr |= X86_TRAP_PF_US;
5861
5862 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5863 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5864 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5865 uErr |= X86_TRAP_PF_ID;
5866
5867#if 0 /* This is so much non-sense, really. Why was it done like that? */
5868 /* Note! RW access callers reporting a WRITE protection fault, will clear
5869 the READ flag before calling. So, read-modify-write accesses (RW)
5870 can safely be reported as READ faults. */
5871 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5872 uErr |= X86_TRAP_PF_RW;
5873#else
5874 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5875 {
5876 if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))
5877 uErr |= X86_TRAP_PF_RW;
5878 }
5879#endif
5880
5881 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5882 uErr, GCPtrWhere);
5883}
5884
5885#ifdef IEM_WITH_SETJMP
5886/** \#PF(n) - 0e, longjmp. */
5887IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5888{
5889 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5890}
5891#endif
5892
5893
5894/** \#MF(0) - 10. */
5895DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5896{
5897 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5898}
5899
5900
5901/** \#AC(0) - 11. */
5902DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5903{
5904 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5905}
5906
5907
5908/**
5909 * Macro for calling iemCImplRaiseDivideError().
5910 *
5911 * This enables us to add/remove arguments and force different levels of
5912 * inlining as we wish.
5913 *
5914 * @return Strict VBox status code.
5915 */
5916#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5917IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5918{
5919 NOREF(cbInstr);
5920 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5921}
5922
5923
5924/**
5925 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5926 *
5927 * This enables us to add/remove arguments and force different levels of
5928 * inlining as we wish.
5929 *
5930 * @return Strict VBox status code.
5931 */
5932#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5933IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5934{
5935 NOREF(cbInstr);
5936 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5937}
5938
5939
5940/**
5941 * Macro for calling iemCImplRaiseInvalidOpcode().
5942 *
5943 * This enables us to add/remove arguments and force different levels of
5944 * inlining as we wish.
5945 *
5946 * @return Strict VBox status code.
5947 */
5948#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5949IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5950{
5951 NOREF(cbInstr);
5952 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5953}
5954
5955
5956/** @} */
5957
5958
5959/*
5960 *
5961 * Helpers routines.
5962 * Helpers routines.
5963 * Helpers routines.
5964 *
5965 */
5966
5967/**
5968 * Recalculates the effective operand size.
5969 *
5970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5971 */
5972IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5973{
5974 switch (pVCpu->iem.s.enmCpuMode)
5975 {
5976 case IEMMODE_16BIT:
5977 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5978 break;
5979 case IEMMODE_32BIT:
5980 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5981 break;
5982 case IEMMODE_64BIT:
5983 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5984 {
5985 case 0:
5986 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5987 break;
5988 case IEM_OP_PRF_SIZE_OP:
5989 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5990 break;
5991 case IEM_OP_PRF_SIZE_REX_W:
5992 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5993 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5994 break;
5995 }
5996 break;
5997 default:
5998 AssertFailed();
5999 }
6000}
6001
6002
6003/**
6004 * Sets the default operand size to 64-bit and recalculates the effective
6005 * operand size.
6006 *
6007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6008 */
6009IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6010{
6011 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6012 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6013 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6014 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6015 else
6016 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6017}
6018
6019
6020/*
6021 *
6022 * Common opcode decoders.
6023 * Common opcode decoders.
6024 * Common opcode decoders.
6025 *
6026 */
6027//#include <iprt/mem.h>
6028
6029/**
6030 * Used to add extra details about a stub case.
6031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6032 */
6033IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6034{
6035#if defined(LOG_ENABLED) && defined(IN_RING3)
6036 PVM pVM = pVCpu->CTX_SUFF(pVM);
6037 char szRegs[4096];
6038 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6039 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6040 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6041 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6042 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6043 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6044 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6045 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6046 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6047 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6048 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6049 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6050 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6051 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6052 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6053 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6054 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6055 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6056 " efer=%016VR{efer}\n"
6057 " pat=%016VR{pat}\n"
6058 " sf_mask=%016VR{sf_mask}\n"
6059 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6060 " lstar=%016VR{lstar}\n"
6061 " star=%016VR{star} cstar=%016VR{cstar}\n"
6062 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6063 );
6064
6065 char szInstr[256];
6066 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6067 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6068 szInstr, sizeof(szInstr), NULL);
6069
6070 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6071#else
6072 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
6073#endif
6074}
6075
6076/**
6077 * Complains about a stub.
6078 *
6079 * Providing two versions of this macro, one for daily use and one for use when
6080 * working on IEM.
6081 */
6082#if 0
6083# define IEMOP_BITCH_ABOUT_STUB() \
6084 do { \
6085 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6086 iemOpStubMsg2(pVCpu); \
6087 RTAssertPanic(); \
6088 } while (0)
6089#else
6090# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6091#endif
6092
6093/** Stubs an opcode. */
6094#define FNIEMOP_STUB(a_Name) \
6095 FNIEMOP_DEF(a_Name) \
6096 { \
6097 RT_NOREF_PV(pVCpu); \
6098 IEMOP_BITCH_ABOUT_STUB(); \
6099 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6100 } \
6101 typedef int ignore_semicolon
6102
6103/** Stubs an opcode. */
6104#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6105 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6106 { \
6107 RT_NOREF_PV(pVCpu); \
6108 RT_NOREF_PV(a_Name0); \
6109 IEMOP_BITCH_ABOUT_STUB(); \
6110 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6111 } \
6112 typedef int ignore_semicolon
6113
6114/** Stubs an opcode which currently should raise \#UD. */
6115#define FNIEMOP_UD_STUB(a_Name) \
6116 FNIEMOP_DEF(a_Name) \
6117 { \
6118 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6119 return IEMOP_RAISE_INVALID_OPCODE(); \
6120 } \
6121 typedef int ignore_semicolon
6122
6123/** Stubs an opcode which currently should raise \#UD. */
6124#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6125 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6126 { \
6127 RT_NOREF_PV(pVCpu); \
6128 RT_NOREF_PV(a_Name0); \
6129 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6130 return IEMOP_RAISE_INVALID_OPCODE(); \
6131 } \
6132 typedef int ignore_semicolon
6133
6134
6135
6136/** @name Register Access.
6137 * @{
6138 */
6139
6140/**
6141 * Gets a reference (pointer) to the specified hidden segment register.
6142 *
6143 * @returns Hidden register reference.
6144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6145 * @param iSegReg The segment register.
6146 */
6147IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6148{
6149 Assert(iSegReg < X86_SREG_COUNT);
6150 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6151 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6152
6153#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6154 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6155 { /* likely */ }
6156 else
6157 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6158#else
6159 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6160#endif
6161 return pSReg;
6162}
6163
6164
6165/**
6166 * Ensures that the given hidden segment register is up to date.
6167 *
6168 * @returns Hidden register reference.
6169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6170 * @param pSReg The segment register.
6171 */
6172IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6173{
6174#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6175 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6176 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6177#else
6178 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6179 NOREF(pVCpu);
6180#endif
6181 return pSReg;
6182}
6183
6184
6185/**
6186 * Gets a reference (pointer) to the specified segment register (the selector
6187 * value).
6188 *
6189 * @returns Pointer to the selector variable.
6190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6191 * @param iSegReg The segment register.
6192 */
6193DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6194{
6195 Assert(iSegReg < X86_SREG_COUNT);
6196 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6197 return &pCtx->aSRegs[iSegReg].Sel;
6198}
6199
6200
6201/**
6202 * Fetches the selector value of a segment register.
6203 *
6204 * @returns The selector value.
6205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6206 * @param iSegReg The segment register.
6207 */
6208DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6209{
6210 Assert(iSegReg < X86_SREG_COUNT);
6211 return IEM_GET_CTX(pVCpu)->aSRegs[iSegReg].Sel;
6212}
6213
6214
6215/**
6216 * Gets a reference (pointer) to the specified general purpose register.
6217 *
6218 * @returns Register reference.
6219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6220 * @param iReg The general purpose register.
6221 */
6222DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6223{
6224 Assert(iReg < 16);
6225 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6226 return &pCtx->aGRegs[iReg];
6227}
6228
6229
6230/**
6231 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6232 *
6233 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6234 *
6235 * @returns Register reference.
6236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6237 * @param iReg The register.
6238 */
6239DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6240{
6241 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6242 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6243 {
6244 Assert(iReg < 16);
6245 return &pCtx->aGRegs[iReg].u8;
6246 }
6247 /* high 8-bit register. */
6248 Assert(iReg < 8);
6249 return &pCtx->aGRegs[iReg & 3].bHi;
6250}
6251
6252
6253/**
6254 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6255 *
6256 * @returns Register reference.
6257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6258 * @param iReg The register.
6259 */
6260DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6261{
6262 Assert(iReg < 16);
6263 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6264 return &pCtx->aGRegs[iReg].u16;
6265}
6266
6267
6268/**
6269 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6270 *
6271 * @returns Register reference.
6272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6273 * @param iReg The register.
6274 */
6275DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6276{
6277 Assert(iReg < 16);
6278 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6279 return &pCtx->aGRegs[iReg].u32;
6280}
6281
6282
6283/**
6284 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6285 *
6286 * @returns Register reference.
6287 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6288 * @param iReg The register.
6289 */
6290DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6291{
6292 Assert(iReg < 64);
6293 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6294 return &pCtx->aGRegs[iReg].u64;
6295}
6296
6297
6298/**
6299 * Fetches the value of a 8-bit general purpose register.
6300 *
6301 * @returns The register value.
6302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6303 * @param iReg The register.
6304 */
6305DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6306{
6307 return *iemGRegRefU8(pVCpu, iReg);
6308}
6309
6310
6311/**
6312 * Fetches the value of a 16-bit general purpose register.
6313 *
6314 * @returns The register value.
6315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6316 * @param iReg The register.
6317 */
6318DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6319{
6320 Assert(iReg < 16);
6321 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6322}
6323
6324
6325/**
6326 * Fetches the value of a 32-bit general purpose register.
6327 *
6328 * @returns The register value.
6329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6330 * @param iReg The register.
6331 */
6332DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6333{
6334 Assert(iReg < 16);
6335 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6336}
6337
6338
6339/**
6340 * Fetches the value of a 64-bit general purpose register.
6341 *
6342 * @returns The register value.
6343 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6344 * @param iReg The register.
6345 */
6346DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6347{
6348 Assert(iReg < 16);
6349 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6350}
6351
6352
6353/**
6354 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6355 *
6356 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6357 * segment limit.
6358 *
6359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6360 * @param offNextInstr The offset of the next instruction.
6361 */
6362IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6363{
6364 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6365 switch (pVCpu->iem.s.enmEffOpSize)
6366 {
6367 case IEMMODE_16BIT:
6368 {
6369 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6370 if ( uNewIp > pCtx->cs.u32Limit
6371 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6372 return iemRaiseGeneralProtectionFault0(pVCpu);
6373 pCtx->rip = uNewIp;
6374 break;
6375 }
6376
6377 case IEMMODE_32BIT:
6378 {
6379 Assert(pCtx->rip <= UINT32_MAX);
6380 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6381
6382 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6383 if (uNewEip > pCtx->cs.u32Limit)
6384 return iemRaiseGeneralProtectionFault0(pVCpu);
6385 pCtx->rip = uNewEip;
6386 break;
6387 }
6388
6389 case IEMMODE_64BIT:
6390 {
6391 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6392
6393 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6394 if (!IEM_IS_CANONICAL(uNewRip))
6395 return iemRaiseGeneralProtectionFault0(pVCpu);
6396 pCtx->rip = uNewRip;
6397 break;
6398 }
6399
6400 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6401 }
6402
6403 pCtx->eflags.Bits.u1RF = 0;
6404
6405#ifndef IEM_WITH_CODE_TLB
6406 /* Flush the prefetch buffer. */
6407 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6408#endif
6409
6410 return VINF_SUCCESS;
6411}
6412
6413
6414/**
6415 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6416 *
6417 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6418 * segment limit.
6419 *
6420 * @returns Strict VBox status code.
6421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6422 * @param offNextInstr The offset of the next instruction.
6423 */
6424IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6425{
6426 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6427 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6428
6429 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6430 if ( uNewIp > pCtx->cs.u32Limit
6431 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6432 return iemRaiseGeneralProtectionFault0(pVCpu);
6433 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6434 pCtx->rip = uNewIp;
6435 pCtx->eflags.Bits.u1RF = 0;
6436
6437#ifndef IEM_WITH_CODE_TLB
6438 /* Flush the prefetch buffer. */
6439 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6440#endif
6441
6442 return VINF_SUCCESS;
6443}
6444
6445
6446/**
6447 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6448 *
6449 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6450 * segment limit.
6451 *
6452 * @returns Strict VBox status code.
6453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6454 * @param offNextInstr The offset of the next instruction.
6455 */
6456IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6457{
6458 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6459 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6460
6461 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6462 {
6463 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6464
6465 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6466 if (uNewEip > pCtx->cs.u32Limit)
6467 return iemRaiseGeneralProtectionFault0(pVCpu);
6468 pCtx->rip = uNewEip;
6469 }
6470 else
6471 {
6472 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6473
6474 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6475 if (!IEM_IS_CANONICAL(uNewRip))
6476 return iemRaiseGeneralProtectionFault0(pVCpu);
6477 pCtx->rip = uNewRip;
6478 }
6479 pCtx->eflags.Bits.u1RF = 0;
6480
6481#ifndef IEM_WITH_CODE_TLB
6482 /* Flush the prefetch buffer. */
6483 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6484#endif
6485
6486 return VINF_SUCCESS;
6487}
6488
6489
6490/**
6491 * Performs a near jump to the specified address.
6492 *
6493 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6494 * segment limit.
6495 *
6496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6497 * @param uNewRip The new RIP value.
6498 */
6499IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6500{
6501 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6502 switch (pVCpu->iem.s.enmEffOpSize)
6503 {
6504 case IEMMODE_16BIT:
6505 {
6506 Assert(uNewRip <= UINT16_MAX);
6507 if ( uNewRip > pCtx->cs.u32Limit
6508 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6509 return iemRaiseGeneralProtectionFault0(pVCpu);
6510 /** @todo Test 16-bit jump in 64-bit mode. */
6511 pCtx->rip = uNewRip;
6512 break;
6513 }
6514
6515 case IEMMODE_32BIT:
6516 {
6517 Assert(uNewRip <= UINT32_MAX);
6518 Assert(pCtx->rip <= UINT32_MAX);
6519 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6520
6521 if (uNewRip > pCtx->cs.u32Limit)
6522 return iemRaiseGeneralProtectionFault0(pVCpu);
6523 pCtx->rip = uNewRip;
6524 break;
6525 }
6526
6527 case IEMMODE_64BIT:
6528 {
6529 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6530
6531 if (!IEM_IS_CANONICAL(uNewRip))
6532 return iemRaiseGeneralProtectionFault0(pVCpu);
6533 pCtx->rip = uNewRip;
6534 break;
6535 }
6536
6537 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6538 }
6539
6540 pCtx->eflags.Bits.u1RF = 0;
6541
6542#ifndef IEM_WITH_CODE_TLB
6543 /* Flush the prefetch buffer. */
6544 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6545#endif
6546
6547 return VINF_SUCCESS;
6548}
6549
6550
6551/**
6552 * Get the address of the top of the stack.
6553 *
6554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6555 * @param pCtx The CPU context which SP/ESP/RSP should be
6556 * read.
6557 */
6558DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6559{
6560 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6561 return pCtx->rsp;
6562 if (pCtx->ss.Attr.n.u1DefBig)
6563 return pCtx->esp;
6564 return pCtx->sp;
6565}
6566
6567
6568/**
6569 * Updates the RIP/EIP/IP to point to the next instruction.
6570 *
6571 * This function leaves the EFLAGS.RF flag alone.
6572 *
6573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6574 * @param cbInstr The number of bytes to add.
6575 */
6576IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6577{
6578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6579 switch (pVCpu->iem.s.enmCpuMode)
6580 {
6581 case IEMMODE_16BIT:
6582 Assert(pCtx->rip <= UINT16_MAX);
6583 pCtx->eip += cbInstr;
6584 pCtx->eip &= UINT32_C(0xffff);
6585 break;
6586
6587 case IEMMODE_32BIT:
6588 pCtx->eip += cbInstr;
6589 Assert(pCtx->rip <= UINT32_MAX);
6590 break;
6591
6592 case IEMMODE_64BIT:
6593 pCtx->rip += cbInstr;
6594 break;
6595 default: AssertFailed();
6596 }
6597}
6598
6599
6600#if 0
6601/**
6602 * Updates the RIP/EIP/IP to point to the next instruction.
6603 *
6604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6605 */
6606IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6607{
6608 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6609}
6610#endif
6611
6612
6613
6614/**
6615 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6616 *
6617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6618 * @param cbInstr The number of bytes to add.
6619 */
6620IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6621{
6622 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6623
6624 pCtx->eflags.Bits.u1RF = 0;
6625
6626 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6627#if ARCH_BITS >= 64
6628 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffff), UINT64_C(0xffffffff), UINT64_MAX };
6629 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6630 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6631#else
6632 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6633 pCtx->rip += cbInstr;
6634 else
6635 {
6636 static uint32_t const s_aEipMasks[] = { UINT32_C(0xffff), UINT32_MAX };
6637 pCtx->eip = (pCtx->eip + cbInstr) & s_aEipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6638 }
6639#endif
6640}
6641
6642
6643/**
6644 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6645 *
6646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6647 */
6648IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6649{
6650 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6651}
6652
6653
6654/**
6655 * Adds to the stack pointer.
6656 *
6657 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6658 * @param pCtx The CPU context which SP/ESP/RSP should be
6659 * updated.
6660 * @param cbToAdd The number of bytes to add (8-bit!).
6661 */
6662DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6663{
6664 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6665 pCtx->rsp += cbToAdd;
6666 else if (pCtx->ss.Attr.n.u1DefBig)
6667 pCtx->esp += cbToAdd;
6668 else
6669 pCtx->sp += cbToAdd;
6670}
6671
6672
6673/**
6674 * Subtracts from the stack pointer.
6675 *
6676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6677 * @param pCtx The CPU context which SP/ESP/RSP should be
6678 * updated.
6679 * @param cbToSub The number of bytes to subtract (8-bit!).
6680 */
6681DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6682{
6683 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6684 pCtx->rsp -= cbToSub;
6685 else if (pCtx->ss.Attr.n.u1DefBig)
6686 pCtx->esp -= cbToSub;
6687 else
6688 pCtx->sp -= cbToSub;
6689}
6690
6691
6692/**
6693 * Adds to the temporary stack pointer.
6694 *
6695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6696 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6697 * @param cbToAdd The number of bytes to add (16-bit).
6698 * @param pCtx Where to get the current stack mode.
6699 */
6700DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6701{
6702 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6703 pTmpRsp->u += cbToAdd;
6704 else if (pCtx->ss.Attr.n.u1DefBig)
6705 pTmpRsp->DWords.dw0 += cbToAdd;
6706 else
6707 pTmpRsp->Words.w0 += cbToAdd;
6708}
6709
6710
6711/**
6712 * Subtracts from the temporary stack pointer.
6713 *
6714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6715 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6716 * @param cbToSub The number of bytes to subtract.
6717 * @param pCtx Where to get the current stack mode.
6718 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6719 * expecting that.
6720 */
6721DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6722{
6723 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6724 pTmpRsp->u -= cbToSub;
6725 else if (pCtx->ss.Attr.n.u1DefBig)
6726 pTmpRsp->DWords.dw0 -= cbToSub;
6727 else
6728 pTmpRsp->Words.w0 -= cbToSub;
6729}
6730
6731
6732/**
6733 * Calculates the effective stack address for a push of the specified size as
6734 * well as the new RSP value (upper bits may be masked).
6735 *
6736 * @returns Effective stack addressf for the push.
6737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6738 * @param pCtx Where to get the current stack mode.
6739 * @param cbItem The size of the stack item to pop.
6740 * @param puNewRsp Where to return the new RSP value.
6741 */
6742DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6743{
6744 RTUINT64U uTmpRsp;
6745 RTGCPTR GCPtrTop;
6746 uTmpRsp.u = pCtx->rsp;
6747
6748 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6749 GCPtrTop = uTmpRsp.u -= cbItem;
6750 else if (pCtx->ss.Attr.n.u1DefBig)
6751 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6752 else
6753 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6754 *puNewRsp = uTmpRsp.u;
6755 return GCPtrTop;
6756}
6757
6758
6759/**
6760 * Gets the current stack pointer and calculates the value after a pop of the
6761 * specified size.
6762 *
6763 * @returns Current stack pointer.
6764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6765 * @param pCtx Where to get the current stack mode.
6766 * @param cbItem The size of the stack item to pop.
6767 * @param puNewRsp Where to return the new RSP value.
6768 */
6769DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6770{
6771 RTUINT64U uTmpRsp;
6772 RTGCPTR GCPtrTop;
6773 uTmpRsp.u = pCtx->rsp;
6774
6775 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6776 {
6777 GCPtrTop = uTmpRsp.u;
6778 uTmpRsp.u += cbItem;
6779 }
6780 else if (pCtx->ss.Attr.n.u1DefBig)
6781 {
6782 GCPtrTop = uTmpRsp.DWords.dw0;
6783 uTmpRsp.DWords.dw0 += cbItem;
6784 }
6785 else
6786 {
6787 GCPtrTop = uTmpRsp.Words.w0;
6788 uTmpRsp.Words.w0 += cbItem;
6789 }
6790 *puNewRsp = uTmpRsp.u;
6791 return GCPtrTop;
6792}
6793
6794
6795/**
6796 * Calculates the effective stack address for a push of the specified size as
6797 * well as the new temporary RSP value (upper bits may be masked).
6798 *
6799 * @returns Effective stack addressf for the push.
6800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6801 * @param pCtx Where to get the current stack mode.
6802 * @param pTmpRsp The temporary stack pointer. This is updated.
6803 * @param cbItem The size of the stack item to pop.
6804 */
6805DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6806{
6807 RTGCPTR GCPtrTop;
6808
6809 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6810 GCPtrTop = pTmpRsp->u -= cbItem;
6811 else if (pCtx->ss.Attr.n.u1DefBig)
6812 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6813 else
6814 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6815 return GCPtrTop;
6816}
6817
6818
6819/**
6820 * Gets the effective stack address for a pop of the specified size and
6821 * calculates and updates the temporary RSP.
6822 *
6823 * @returns Current stack pointer.
6824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6825 * @param pCtx Where to get the current stack mode.
6826 * @param pTmpRsp The temporary stack pointer. This is updated.
6827 * @param cbItem The size of the stack item to pop.
6828 */
6829DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6830{
6831 RTGCPTR GCPtrTop;
6832 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6833 {
6834 GCPtrTop = pTmpRsp->u;
6835 pTmpRsp->u += cbItem;
6836 }
6837 else if (pCtx->ss.Attr.n.u1DefBig)
6838 {
6839 GCPtrTop = pTmpRsp->DWords.dw0;
6840 pTmpRsp->DWords.dw0 += cbItem;
6841 }
6842 else
6843 {
6844 GCPtrTop = pTmpRsp->Words.w0;
6845 pTmpRsp->Words.w0 += cbItem;
6846 }
6847 return GCPtrTop;
6848}
6849
6850/** @} */
6851
6852
6853/** @name FPU access and helpers.
6854 *
6855 * @{
6856 */
6857
6858
6859/**
6860 * Hook for preparing to use the host FPU.
6861 *
6862 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6863 *
6864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6865 */
6866DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6867{
6868#ifdef IN_RING3
6869 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6870#else
6871 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6872#endif
6873}
6874
6875
6876/**
6877 * Hook for preparing to use the host FPU for SSE
6878 *
6879 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6880 *
6881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6882 */
6883DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6884{
6885 iemFpuPrepareUsage(pVCpu);
6886}
6887
6888
6889/**
6890 * Hook for actualizing the guest FPU state before the interpreter reads it.
6891 *
6892 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6893 *
6894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6895 */
6896DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6897{
6898#ifdef IN_RING3
6899 NOREF(pVCpu);
6900#else
6901 CPUMRZFpuStateActualizeForRead(pVCpu);
6902#endif
6903}
6904
6905
6906/**
6907 * Hook for actualizing the guest FPU state before the interpreter changes it.
6908 *
6909 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6910 *
6911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6912 */
6913DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6914{
6915#ifdef IN_RING3
6916 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6917#else
6918 CPUMRZFpuStateActualizeForChange(pVCpu);
6919#endif
6920}
6921
6922
6923/**
6924 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6925 * only.
6926 *
6927 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6928 *
6929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6930 */
6931DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6932{
6933#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6934 NOREF(pVCpu);
6935#else
6936 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6937#endif
6938}
6939
6940
6941/**
6942 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6943 * read+write.
6944 *
6945 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6946 *
6947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6948 */
6949DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6950{
6951#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6952 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6953#else
6954 CPUMRZFpuStateActualizeForChange(pVCpu);
6955#endif
6956}
6957
6958
6959/**
6960 * Stores a QNaN value into a FPU register.
6961 *
6962 * @param pReg Pointer to the register.
6963 */
6964DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6965{
6966 pReg->au32[0] = UINT32_C(0x00000000);
6967 pReg->au32[1] = UINT32_C(0xc0000000);
6968 pReg->au16[4] = UINT16_C(0xffff);
6969}
6970
6971
6972/**
6973 * Updates the FOP, FPU.CS and FPUIP registers.
6974 *
6975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6976 * @param pCtx The CPU context.
6977 * @param pFpuCtx The FPU context.
6978 */
6979DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6980{
6981 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6982 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6983 /** @todo x87.CS and FPUIP needs to be kept seperately. */
6984 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
6985 {
6986 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
6987 * happens in real mode here based on the fnsave and fnstenv images. */
6988 pFpuCtx->CS = 0;
6989 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
6990 }
6991 else
6992 {
6993 pFpuCtx->CS = pCtx->cs.Sel;
6994 pFpuCtx->FPUIP = pCtx->rip;
6995 }
6996}
6997
6998
6999/**
7000 * Updates the x87.DS and FPUDP registers.
7001 *
7002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7003 * @param pCtx The CPU context.
7004 * @param pFpuCtx The FPU context.
7005 * @param iEffSeg The effective segment register.
7006 * @param GCPtrEff The effective address relative to @a iEffSeg.
7007 */
7008DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7009{
7010 RTSEL sel;
7011 switch (iEffSeg)
7012 {
7013 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7014 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7015 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7016 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7017 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7018 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7019 default:
7020 AssertMsgFailed(("%d\n", iEffSeg));
7021 sel = pCtx->ds.Sel;
7022 }
7023 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7024 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7025 {
7026 pFpuCtx->DS = 0;
7027 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7028 }
7029 else
7030 {
7031 pFpuCtx->DS = sel;
7032 pFpuCtx->FPUDP = GCPtrEff;
7033 }
7034}
7035
7036
7037/**
7038 * Rotates the stack registers in the push direction.
7039 *
7040 * @param pFpuCtx The FPU context.
7041 * @remarks This is a complete waste of time, but fxsave stores the registers in
7042 * stack order.
7043 */
7044DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7045{
7046 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7047 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7048 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7049 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7050 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7051 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7052 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7053 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7054 pFpuCtx->aRegs[0].r80 = r80Tmp;
7055}
7056
7057
7058/**
7059 * Rotates the stack registers in the pop direction.
7060 *
7061 * @param pFpuCtx The FPU context.
7062 * @remarks This is a complete waste of time, but fxsave stores the registers in
7063 * stack order.
7064 */
7065DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7066{
7067 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7068 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7069 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7070 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7071 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7072 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7073 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7074 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7075 pFpuCtx->aRegs[7].r80 = r80Tmp;
7076}
7077
7078
7079/**
7080 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7081 * exception prevents it.
7082 *
7083 * @param pResult The FPU operation result to push.
7084 * @param pFpuCtx The FPU context.
7085 */
7086IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7087{
7088 /* Update FSW and bail if there are pending exceptions afterwards. */
7089 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7090 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7091 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7092 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7093 {
7094 pFpuCtx->FSW = fFsw;
7095 return;
7096 }
7097
7098 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7099 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7100 {
7101 /* All is fine, push the actual value. */
7102 pFpuCtx->FTW |= RT_BIT(iNewTop);
7103 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7104 }
7105 else if (pFpuCtx->FCW & X86_FCW_IM)
7106 {
7107 /* Masked stack overflow, push QNaN. */
7108 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7109 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7110 }
7111 else
7112 {
7113 /* Raise stack overflow, don't push anything. */
7114 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7115 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7116 return;
7117 }
7118
7119 fFsw &= ~X86_FSW_TOP_MASK;
7120 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7121 pFpuCtx->FSW = fFsw;
7122
7123 iemFpuRotateStackPush(pFpuCtx);
7124}
7125
7126
7127/**
7128 * Stores a result in a FPU register and updates the FSW and FTW.
7129 *
7130 * @param pFpuCtx The FPU context.
7131 * @param pResult The result to store.
7132 * @param iStReg Which FPU register to store it in.
7133 */
7134IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7135{
7136 Assert(iStReg < 8);
7137 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7138 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7139 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7140 pFpuCtx->FTW |= RT_BIT(iReg);
7141 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7142}
7143
7144
7145/**
7146 * Only updates the FPU status word (FSW) with the result of the current
7147 * instruction.
7148 *
7149 * @param pFpuCtx The FPU context.
7150 * @param u16FSW The FSW output of the current instruction.
7151 */
7152IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7153{
7154 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7155 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7156}
7157
7158
7159/**
7160 * Pops one item off the FPU stack if no pending exception prevents it.
7161 *
7162 * @param pFpuCtx The FPU context.
7163 */
7164IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7165{
7166 /* Check pending exceptions. */
7167 uint16_t uFSW = pFpuCtx->FSW;
7168 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7169 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7170 return;
7171
7172 /* TOP--. */
7173 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7174 uFSW &= ~X86_FSW_TOP_MASK;
7175 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7176 pFpuCtx->FSW = uFSW;
7177
7178 /* Mark the previous ST0 as empty. */
7179 iOldTop >>= X86_FSW_TOP_SHIFT;
7180 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7181
7182 /* Rotate the registers. */
7183 iemFpuRotateStackPop(pFpuCtx);
7184}
7185
7186
7187/**
7188 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7189 *
7190 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7191 * @param pResult The FPU operation result to push.
7192 */
7193IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7194{
7195 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7196 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7197 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7198 iemFpuMaybePushResult(pResult, pFpuCtx);
7199}
7200
7201
7202/**
7203 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7204 * and sets FPUDP and FPUDS.
7205 *
7206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7207 * @param pResult The FPU operation result to push.
7208 * @param iEffSeg The effective segment register.
7209 * @param GCPtrEff The effective address relative to @a iEffSeg.
7210 */
7211IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7212{
7213 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7214 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7215 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7216 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7217 iemFpuMaybePushResult(pResult, pFpuCtx);
7218}
7219
7220
7221/**
7222 * Replace ST0 with the first value and push the second onto the FPU stack,
7223 * unless a pending exception prevents it.
7224 *
7225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7226 * @param pResult The FPU operation result to store and push.
7227 */
7228IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7229{
7230 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7231 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7232 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7233
7234 /* Update FSW and bail if there are pending exceptions afterwards. */
7235 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7236 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7237 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7238 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7239 {
7240 pFpuCtx->FSW = fFsw;
7241 return;
7242 }
7243
7244 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7245 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7246 {
7247 /* All is fine, push the actual value. */
7248 pFpuCtx->FTW |= RT_BIT(iNewTop);
7249 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7250 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7251 }
7252 else if (pFpuCtx->FCW & X86_FCW_IM)
7253 {
7254 /* Masked stack overflow, push QNaN. */
7255 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7256 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7257 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7258 }
7259 else
7260 {
7261 /* Raise stack overflow, don't push anything. */
7262 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7263 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7264 return;
7265 }
7266
7267 fFsw &= ~X86_FSW_TOP_MASK;
7268 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7269 pFpuCtx->FSW = fFsw;
7270
7271 iemFpuRotateStackPush(pFpuCtx);
7272}
7273
7274
7275/**
7276 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7277 * FOP.
7278 *
7279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7280 * @param pResult The result to store.
7281 * @param iStReg Which FPU register to store it in.
7282 */
7283IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7284{
7285 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7286 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7287 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7288 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7289}
7290
7291
7292/**
7293 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7294 * FOP, and then pops the stack.
7295 *
7296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7297 * @param pResult The result to store.
7298 * @param iStReg Which FPU register to store it in.
7299 */
7300IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7301{
7302 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7303 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7304 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7305 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7306 iemFpuMaybePopOne(pFpuCtx);
7307}
7308
7309
7310/**
7311 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7312 * FPUDP, and FPUDS.
7313 *
7314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7315 * @param pResult The result to store.
7316 * @param iStReg Which FPU register to store it in.
7317 * @param iEffSeg The effective memory operand selector register.
7318 * @param GCPtrEff The effective memory operand offset.
7319 */
7320IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7321 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7322{
7323 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7324 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7325 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7326 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7327 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7328}
7329
7330
7331/**
7332 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7333 * FPUDP, and FPUDS, and then pops the stack.
7334 *
7335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7336 * @param pResult The result to store.
7337 * @param iStReg Which FPU register to store it in.
7338 * @param iEffSeg The effective memory operand selector register.
7339 * @param GCPtrEff The effective memory operand offset.
7340 */
7341IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7342 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7343{
7344 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7345 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7346 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7347 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7348 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7349 iemFpuMaybePopOne(pFpuCtx);
7350}
7351
7352
7353/**
7354 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7355 *
7356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7357 */
7358IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7359{
7360 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7361 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7362 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7363}
7364
7365
7366/**
7367 * Marks the specified stack register as free (for FFREE).
7368 *
7369 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7370 * @param iStReg The register to free.
7371 */
7372IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7373{
7374 Assert(iStReg < 8);
7375 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7376 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7377 pFpuCtx->FTW &= ~RT_BIT(iReg);
7378}
7379
7380
7381/**
7382 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7383 *
7384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7385 */
7386IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7387{
7388 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7389 uint16_t uFsw = pFpuCtx->FSW;
7390 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7391 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7392 uFsw &= ~X86_FSW_TOP_MASK;
7393 uFsw |= uTop;
7394 pFpuCtx->FSW = uFsw;
7395}
7396
7397
7398/**
7399 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7400 *
7401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7402 */
7403IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7404{
7405 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7406 uint16_t uFsw = pFpuCtx->FSW;
7407 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7408 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7409 uFsw &= ~X86_FSW_TOP_MASK;
7410 uFsw |= uTop;
7411 pFpuCtx->FSW = uFsw;
7412}
7413
7414
7415/**
7416 * Updates the FSW, FOP, FPUIP, and FPUCS.
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param u16FSW The FSW from the current instruction.
7420 */
7421IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7422{
7423 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7424 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7425 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7426 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7427}
7428
7429
7430/**
7431 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7432 *
7433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7434 * @param u16FSW The FSW from the current instruction.
7435 */
7436IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7437{
7438 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7439 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7440 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7441 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7442 iemFpuMaybePopOne(pFpuCtx);
7443}
7444
7445
7446/**
7447 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7448 *
7449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7450 * @param u16FSW The FSW from the current instruction.
7451 * @param iEffSeg The effective memory operand selector register.
7452 * @param GCPtrEff The effective memory operand offset.
7453 */
7454IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7455{
7456 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7457 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7458 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7459 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7460 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7461}
7462
7463
7464/**
7465 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7466 *
7467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7468 * @param u16FSW The FSW from the current instruction.
7469 */
7470IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7471{
7472 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7473 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7474 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7475 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7476 iemFpuMaybePopOne(pFpuCtx);
7477 iemFpuMaybePopOne(pFpuCtx);
7478}
7479
7480
7481/**
7482 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7483 *
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 * @param u16FSW The FSW from the current instruction.
7486 * @param iEffSeg The effective memory operand selector register.
7487 * @param GCPtrEff The effective memory operand offset.
7488 */
7489IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7490{
7491 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7492 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7493 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7494 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7495 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7496 iemFpuMaybePopOne(pFpuCtx);
7497}
7498
7499
7500/**
7501 * Worker routine for raising an FPU stack underflow exception.
7502 *
7503 * @param pFpuCtx The FPU context.
7504 * @param iStReg The stack register being accessed.
7505 */
7506IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7507{
7508 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7509 if (pFpuCtx->FCW & X86_FCW_IM)
7510 {
7511 /* Masked underflow. */
7512 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7513 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7514 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7515 if (iStReg != UINT8_MAX)
7516 {
7517 pFpuCtx->FTW |= RT_BIT(iReg);
7518 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7519 }
7520 }
7521 else
7522 {
7523 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7524 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7525 }
7526}
7527
7528
7529/**
7530 * Raises a FPU stack underflow exception.
7531 *
7532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7533 * @param iStReg The destination register that should be loaded
7534 * with QNaN if \#IS is not masked. Specify
7535 * UINT8_MAX if none (like for fcom).
7536 */
7537DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7538{
7539 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7540 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7541 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7542 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7543}
7544
7545
7546DECL_NO_INLINE(IEM_STATIC, void)
7547iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7548{
7549 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7550 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7551 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7552 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7553 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7554}
7555
7556
7557DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7558{
7559 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7560 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7561 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7562 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7563 iemFpuMaybePopOne(pFpuCtx);
7564}
7565
7566
7567DECL_NO_INLINE(IEM_STATIC, void)
7568iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7569{
7570 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7571 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7572 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7573 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7574 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7575 iemFpuMaybePopOne(pFpuCtx);
7576}
7577
7578
7579DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7580{
7581 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7582 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7583 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7584 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7585 iemFpuMaybePopOne(pFpuCtx);
7586 iemFpuMaybePopOne(pFpuCtx);
7587}
7588
7589
7590DECL_NO_INLINE(IEM_STATIC, void)
7591iemFpuStackPushUnderflow(PVMCPU pVCpu)
7592{
7593 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7594 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7595 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7596
7597 if (pFpuCtx->FCW & X86_FCW_IM)
7598 {
7599 /* Masked overflow - Push QNaN. */
7600 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7601 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7602 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7603 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7604 pFpuCtx->FTW |= RT_BIT(iNewTop);
7605 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7606 iemFpuRotateStackPush(pFpuCtx);
7607 }
7608 else
7609 {
7610 /* Exception pending - don't change TOP or the register stack. */
7611 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7612 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7613 }
7614}
7615
7616
7617DECL_NO_INLINE(IEM_STATIC, void)
7618iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7619{
7620 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7621 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7622 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7623
7624 if (pFpuCtx->FCW & X86_FCW_IM)
7625 {
7626 /* Masked overflow - Push QNaN. */
7627 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7628 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7629 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7630 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7631 pFpuCtx->FTW |= RT_BIT(iNewTop);
7632 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7633 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7634 iemFpuRotateStackPush(pFpuCtx);
7635 }
7636 else
7637 {
7638 /* Exception pending - don't change TOP or the register stack. */
7639 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7640 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7641 }
7642}
7643
7644
7645/**
7646 * Worker routine for raising an FPU stack overflow exception on a push.
7647 *
7648 * @param pFpuCtx The FPU context.
7649 */
7650IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7651{
7652 if (pFpuCtx->FCW & X86_FCW_IM)
7653 {
7654 /* Masked overflow. */
7655 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7656 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7657 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7658 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7659 pFpuCtx->FTW |= RT_BIT(iNewTop);
7660 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7661 iemFpuRotateStackPush(pFpuCtx);
7662 }
7663 else
7664 {
7665 /* Exception pending - don't change TOP or the register stack. */
7666 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7667 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7668 }
7669}
7670
7671
7672/**
7673 * Raises a FPU stack overflow exception on a push.
7674 *
7675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7676 */
7677DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7678{
7679 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7680 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7681 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7682 iemFpuStackPushOverflowOnly(pFpuCtx);
7683}
7684
7685
7686/**
7687 * Raises a FPU stack overflow exception on a push with a memory operand.
7688 *
7689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7690 * @param iEffSeg The effective memory operand selector register.
7691 * @param GCPtrEff The effective memory operand offset.
7692 */
7693DECL_NO_INLINE(IEM_STATIC, void)
7694iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7695{
7696 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7697 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7698 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7699 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7700 iemFpuStackPushOverflowOnly(pFpuCtx);
7701}
7702
7703
7704IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7705{
7706 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7707 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7708 if (pFpuCtx->FTW & RT_BIT(iReg))
7709 return VINF_SUCCESS;
7710 return VERR_NOT_FOUND;
7711}
7712
7713
7714IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7715{
7716 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7717 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7718 if (pFpuCtx->FTW & RT_BIT(iReg))
7719 {
7720 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7721 return VINF_SUCCESS;
7722 }
7723 return VERR_NOT_FOUND;
7724}
7725
7726
7727IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7728 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7729{
7730 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7731 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7732 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7733 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7734 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7735 {
7736 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7737 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7738 return VINF_SUCCESS;
7739 }
7740 return VERR_NOT_FOUND;
7741}
7742
7743
7744IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7745{
7746 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7747 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7748 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7749 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7750 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7751 {
7752 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7753 return VINF_SUCCESS;
7754 }
7755 return VERR_NOT_FOUND;
7756}
7757
7758
7759/**
7760 * Updates the FPU exception status after FCW is changed.
7761 *
7762 * @param pFpuCtx The FPU context.
7763 */
7764IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7765{
7766 uint16_t u16Fsw = pFpuCtx->FSW;
7767 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7768 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7769 else
7770 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7771 pFpuCtx->FSW = u16Fsw;
7772}
7773
7774
7775/**
7776 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7777 *
7778 * @returns The full FTW.
7779 * @param pFpuCtx The FPU context.
7780 */
7781IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7782{
7783 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7784 uint16_t u16Ftw = 0;
7785 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7786 for (unsigned iSt = 0; iSt < 8; iSt++)
7787 {
7788 unsigned const iReg = (iSt + iTop) & 7;
7789 if (!(u8Ftw & RT_BIT(iReg)))
7790 u16Ftw |= 3 << (iReg * 2); /* empty */
7791 else
7792 {
7793 uint16_t uTag;
7794 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7795 if (pr80Reg->s.uExponent == 0x7fff)
7796 uTag = 2; /* Exponent is all 1's => Special. */
7797 else if (pr80Reg->s.uExponent == 0x0000)
7798 {
7799 if (pr80Reg->s.u64Mantissa == 0x0000)
7800 uTag = 1; /* All bits are zero => Zero. */
7801 else
7802 uTag = 2; /* Must be special. */
7803 }
7804 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7805 uTag = 0; /* Valid. */
7806 else
7807 uTag = 2; /* Must be special. */
7808
7809 u16Ftw |= uTag << (iReg * 2); /* empty */
7810 }
7811 }
7812
7813 return u16Ftw;
7814}
7815
7816
7817/**
7818 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7819 *
7820 * @returns The compressed FTW.
7821 * @param u16FullFtw The full FTW to convert.
7822 */
7823IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7824{
7825 uint8_t u8Ftw = 0;
7826 for (unsigned i = 0; i < 8; i++)
7827 {
7828 if ((u16FullFtw & 3) != 3 /*empty*/)
7829 u8Ftw |= RT_BIT(i);
7830 u16FullFtw >>= 2;
7831 }
7832
7833 return u8Ftw;
7834}
7835
7836/** @} */
7837
7838
7839/** @name Memory access.
7840 *
7841 * @{
7842 */
7843
7844
7845/**
7846 * Updates the IEMCPU::cbWritten counter if applicable.
7847 *
7848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7849 * @param fAccess The access being accounted for.
7850 * @param cbMem The access size.
7851 */
7852DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7853{
7854 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7855 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7856 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7857}
7858
7859
7860/**
7861 * Checks if the given segment can be written to, raise the appropriate
7862 * exception if not.
7863 *
7864 * @returns VBox strict status code.
7865 *
7866 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7867 * @param pHid Pointer to the hidden register.
7868 * @param iSegReg The register number.
7869 * @param pu64BaseAddr Where to return the base address to use for the
7870 * segment. (In 64-bit code it may differ from the
7871 * base in the hidden segment.)
7872 */
7873IEM_STATIC VBOXSTRICTRC
7874iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7875{
7876 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7877 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7878 else
7879 {
7880 if (!pHid->Attr.n.u1Present)
7881 {
7882 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7883 AssertRelease(uSel == 0);
7884 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7885 return iemRaiseGeneralProtectionFault0(pVCpu);
7886 }
7887
7888 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7889 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7890 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7891 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7892 *pu64BaseAddr = pHid->u64Base;
7893 }
7894 return VINF_SUCCESS;
7895}
7896
7897
7898/**
7899 * Checks if the given segment can be read from, raise the appropriate
7900 * exception if not.
7901 *
7902 * @returns VBox strict status code.
7903 *
7904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7905 * @param pHid Pointer to the hidden register.
7906 * @param iSegReg The register number.
7907 * @param pu64BaseAddr Where to return the base address to use for the
7908 * segment. (In 64-bit code it may differ from the
7909 * base in the hidden segment.)
7910 */
7911IEM_STATIC VBOXSTRICTRC
7912iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7913{
7914 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7915 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7916 else
7917 {
7918 if (!pHid->Attr.n.u1Present)
7919 {
7920 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7921 AssertRelease(uSel == 0);
7922 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7923 return iemRaiseGeneralProtectionFault0(pVCpu);
7924 }
7925
7926 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7927 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7928 *pu64BaseAddr = pHid->u64Base;
7929 }
7930 return VINF_SUCCESS;
7931}
7932
7933
7934/**
7935 * Applies the segment limit, base and attributes.
7936 *
7937 * This may raise a \#GP or \#SS.
7938 *
7939 * @returns VBox strict status code.
7940 *
7941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7942 * @param fAccess The kind of access which is being performed.
7943 * @param iSegReg The index of the segment register to apply.
7944 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7945 * TSS, ++).
7946 * @param cbMem The access size.
7947 * @param pGCPtrMem Pointer to the guest memory address to apply
7948 * segmentation to. Input and output parameter.
7949 */
7950IEM_STATIC VBOXSTRICTRC
7951iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7952{
7953 if (iSegReg == UINT8_MAX)
7954 return VINF_SUCCESS;
7955
7956 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7957 switch (pVCpu->iem.s.enmCpuMode)
7958 {
7959 case IEMMODE_16BIT:
7960 case IEMMODE_32BIT:
7961 {
7962 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7963 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7964
7965 if ( pSel->Attr.n.u1Present
7966 && !pSel->Attr.n.u1Unusable)
7967 {
7968 Assert(pSel->Attr.n.u1DescType);
7969 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7970 {
7971 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7972 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7973 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7974
7975 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7976 {
7977 /** @todo CPL check. */
7978 }
7979
7980 /*
7981 * There are two kinds of data selectors, normal and expand down.
7982 */
7983 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
7984 {
7985 if ( GCPtrFirst32 > pSel->u32Limit
7986 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
7987 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7988 }
7989 else
7990 {
7991 /*
7992 * The upper boundary is defined by the B bit, not the G bit!
7993 */
7994 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
7995 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
7996 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
7997 }
7998 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
7999 }
8000 else
8001 {
8002
8003 /*
8004 * Code selector and usually be used to read thru, writing is
8005 * only permitted in real and V8086 mode.
8006 */
8007 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8008 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8009 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8010 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8011 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8012
8013 if ( GCPtrFirst32 > pSel->u32Limit
8014 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8015 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8016
8017 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8018 {
8019 /** @todo CPL check. */
8020 }
8021
8022 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8023 }
8024 }
8025 else
8026 return iemRaiseGeneralProtectionFault0(pVCpu);
8027 return VINF_SUCCESS;
8028 }
8029
8030 case IEMMODE_64BIT:
8031 {
8032 RTGCPTR GCPtrMem = *pGCPtrMem;
8033 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8034 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8035
8036 Assert(cbMem >= 1);
8037 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8038 return VINF_SUCCESS;
8039 return iemRaiseGeneralProtectionFault0(pVCpu);
8040 }
8041
8042 default:
8043 AssertFailedReturn(VERR_IEM_IPE_7);
8044 }
8045}
8046
8047
8048/**
8049 * Translates a virtual address to a physical physical address and checks if we
8050 * can access the page as specified.
8051 *
8052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8053 * @param GCPtrMem The virtual address.
8054 * @param fAccess The intended access.
8055 * @param pGCPhysMem Where to return the physical address.
8056 */
8057IEM_STATIC VBOXSTRICTRC
8058iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8059{
8060 /** @todo Need a different PGM interface here. We're currently using
8061 * generic / REM interfaces. this won't cut it for R0 & RC. */
8062 RTGCPHYS GCPhys;
8063 uint64_t fFlags;
8064 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8065 if (RT_FAILURE(rc))
8066 {
8067 /** @todo Check unassigned memory in unpaged mode. */
8068 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8069 *pGCPhysMem = NIL_RTGCPHYS;
8070 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8071 }
8072
8073 /* If the page is writable and does not have the no-exec bit set, all
8074 access is allowed. Otherwise we'll have to check more carefully... */
8075 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8076 {
8077 /* Write to read only memory? */
8078 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8079 && !(fFlags & X86_PTE_RW)
8080 && ( (pVCpu->iem.s.uCpl == 3
8081 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8082 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8083 {
8084 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8085 *pGCPhysMem = NIL_RTGCPHYS;
8086 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8087 }
8088
8089 /* Kernel memory accessed by userland? */
8090 if ( !(fFlags & X86_PTE_US)
8091 && pVCpu->iem.s.uCpl == 3
8092 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8093 {
8094 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8095 *pGCPhysMem = NIL_RTGCPHYS;
8096 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8097 }
8098
8099 /* Executing non-executable memory? */
8100 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8101 && (fFlags & X86_PTE_PAE_NX)
8102 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8103 {
8104 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8105 *pGCPhysMem = NIL_RTGCPHYS;
8106 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8107 VERR_ACCESS_DENIED);
8108 }
8109 }
8110
8111 /*
8112 * Set the dirty / access flags.
8113 * ASSUMES this is set when the address is translated rather than on committ...
8114 */
8115 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8116 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8117 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8118 {
8119 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8120 AssertRC(rc2);
8121 }
8122
8123 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8124 *pGCPhysMem = GCPhys;
8125 return VINF_SUCCESS;
8126}
8127
8128
8129
8130/**
8131 * Maps a physical page.
8132 *
8133 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8135 * @param GCPhysMem The physical address.
8136 * @param fAccess The intended access.
8137 * @param ppvMem Where to return the mapping address.
8138 * @param pLock The PGM lock.
8139 */
8140IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8141{
8142#ifdef IEM_VERIFICATION_MODE_FULL
8143 /* Force the alternative path so we can ignore writes. */
8144 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)
8145 {
8146 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8147 {
8148 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,
8149 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8150 if (RT_FAILURE(rc2))
8151 pVCpu->iem.s.fProblematicMemory = true;
8152 }
8153 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8154 }
8155#endif
8156#ifdef IEM_LOG_MEMORY_WRITES
8157 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8158 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8159#endif
8160#ifdef IEM_VERIFICATION_MODE_MINIMAL
8161 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8162#endif
8163
8164 /** @todo This API may require some improving later. A private deal with PGM
8165 * regarding locking and unlocking needs to be struct. A couple of TLBs
8166 * living in PGM, but with publicly accessible inlined access methods
8167 * could perhaps be an even better solution. */
8168 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8169 GCPhysMem,
8170 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8171 pVCpu->iem.s.fBypassHandlers,
8172 ppvMem,
8173 pLock);
8174 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8175 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8176
8177#ifdef IEM_VERIFICATION_MODE_FULL
8178 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8179 pVCpu->iem.s.fProblematicMemory = true;
8180#endif
8181 return rc;
8182}
8183
8184
8185/**
8186 * Unmap a page previously mapped by iemMemPageMap.
8187 *
8188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8189 * @param GCPhysMem The physical address.
8190 * @param fAccess The intended access.
8191 * @param pvMem What iemMemPageMap returned.
8192 * @param pLock The PGM lock.
8193 */
8194DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8195{
8196 NOREF(pVCpu);
8197 NOREF(GCPhysMem);
8198 NOREF(fAccess);
8199 NOREF(pvMem);
8200 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8201}
8202
8203
8204/**
8205 * Looks up a memory mapping entry.
8206 *
8207 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8209 * @param pvMem The memory address.
8210 * @param fAccess The access to.
8211 */
8212DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8213{
8214 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8215 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8216 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8217 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8218 return 0;
8219 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8220 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8221 return 1;
8222 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8223 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8224 return 2;
8225 return VERR_NOT_FOUND;
8226}
8227
8228
8229/**
8230 * Finds a free memmap entry when using iNextMapping doesn't work.
8231 *
8232 * @returns Memory mapping index, 1024 on failure.
8233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8234 */
8235IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8236{
8237 /*
8238 * The easy case.
8239 */
8240 if (pVCpu->iem.s.cActiveMappings == 0)
8241 {
8242 pVCpu->iem.s.iNextMapping = 1;
8243 return 0;
8244 }
8245
8246 /* There should be enough mappings for all instructions. */
8247 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8248
8249 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8250 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8251 return i;
8252
8253 AssertFailedReturn(1024);
8254}
8255
8256
8257/**
8258 * Commits a bounce buffer that needs writing back and unmaps it.
8259 *
8260 * @returns Strict VBox status code.
8261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8262 * @param iMemMap The index of the buffer to commit.
8263 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8264 * Always false in ring-3, obviously.
8265 */
8266IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8267{
8268 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8269 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8270#ifdef IN_RING3
8271 Assert(!fPostponeFail);
8272 RT_NOREF_PV(fPostponeFail);
8273#endif
8274
8275 /*
8276 * Do the writing.
8277 */
8278#ifndef IEM_VERIFICATION_MODE_MINIMAL
8279 PVM pVM = pVCpu->CTX_SUFF(pVM);
8280 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned
8281 && !IEM_VERIFICATION_ENABLED(pVCpu))
8282 {
8283 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8284 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8285 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8286 if (!pVCpu->iem.s.fBypassHandlers)
8287 {
8288 /*
8289 * Carefully and efficiently dealing with access handler return
8290 * codes make this a little bloated.
8291 */
8292 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8293 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8294 pbBuf,
8295 cbFirst,
8296 PGMACCESSORIGIN_IEM);
8297 if (rcStrict == VINF_SUCCESS)
8298 {
8299 if (cbSecond)
8300 {
8301 rcStrict = PGMPhysWrite(pVM,
8302 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8303 pbBuf + cbFirst,
8304 cbSecond,
8305 PGMACCESSORIGIN_IEM);
8306 if (rcStrict == VINF_SUCCESS)
8307 { /* nothing */ }
8308 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8309 {
8310 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8311 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8312 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8313 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8314 }
8315# ifndef IN_RING3
8316 else if (fPostponeFail)
8317 {
8318 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8319 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8320 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8321 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8322 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8323 return iemSetPassUpStatus(pVCpu, rcStrict);
8324 }
8325# endif
8326 else
8327 {
8328 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8329 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8330 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8331 return rcStrict;
8332 }
8333 }
8334 }
8335 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8336 {
8337 if (!cbSecond)
8338 {
8339 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8340 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8341 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8342 }
8343 else
8344 {
8345 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8346 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8347 pbBuf + cbFirst,
8348 cbSecond,
8349 PGMACCESSORIGIN_IEM);
8350 if (rcStrict2 == VINF_SUCCESS)
8351 {
8352 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8353 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8355 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8356 }
8357 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8358 {
8359 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8360 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8362 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8363 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8364 }
8365# ifndef IN_RING3
8366 else if (fPostponeFail)
8367 {
8368 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8369 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8371 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8372 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8373 return iemSetPassUpStatus(pVCpu, rcStrict);
8374 }
8375# endif
8376 else
8377 {
8378 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8379 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8381 return rcStrict2;
8382 }
8383 }
8384 }
8385# ifndef IN_RING3
8386 else if (fPostponeFail)
8387 {
8388 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8389 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8391 if (!cbSecond)
8392 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8393 else
8394 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8395 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8396 return iemSetPassUpStatus(pVCpu, rcStrict);
8397 }
8398# endif
8399 else
8400 {
8401 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8402 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8404 return rcStrict;
8405 }
8406 }
8407 else
8408 {
8409 /*
8410 * No access handlers, much simpler.
8411 */
8412 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8413 if (RT_SUCCESS(rc))
8414 {
8415 if (cbSecond)
8416 {
8417 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8418 if (RT_SUCCESS(rc))
8419 { /* likely */ }
8420 else
8421 {
8422 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8423 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8425 return rc;
8426 }
8427 }
8428 }
8429 else
8430 {
8431 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8432 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8434 return rc;
8435 }
8436 }
8437 }
8438#endif
8439
8440#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8441 /*
8442 * Record the write(s).
8443 */
8444 if (!pVCpu->iem.s.fNoRem)
8445 {
8446 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8447 if (pEvtRec)
8448 {
8449 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8450 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst;
8451 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8452 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst);
8453 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab));
8454 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8455 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8456 }
8457 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8458 {
8459 pEvtRec = iemVerifyAllocRecord(pVCpu);
8460 if (pEvtRec)
8461 {
8462 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
8463 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond;
8464 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8465 memcpy(pEvtRec->u.RamWrite.ab,
8466 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst],
8467 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond);
8468 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8469 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8470 }
8471 }
8472 }
8473#endif
8474#if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)
8475 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8476 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8477 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8478 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8479 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8480 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8481
8482 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8483 g_cbIemWrote = cbWrote;
8484 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8485#endif
8486
8487 /*
8488 * Free the mapping entry.
8489 */
8490 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8491 Assert(pVCpu->iem.s.cActiveMappings != 0);
8492 pVCpu->iem.s.cActiveMappings--;
8493 return VINF_SUCCESS;
8494}
8495
8496
8497/**
8498 * iemMemMap worker that deals with a request crossing pages.
8499 */
8500IEM_STATIC VBOXSTRICTRC
8501iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8502{
8503 /*
8504 * Do the address translations.
8505 */
8506 RTGCPHYS GCPhysFirst;
8507 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8508 if (rcStrict != VINF_SUCCESS)
8509 return rcStrict;
8510
8511 RTGCPHYS GCPhysSecond;
8512 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8513 fAccess, &GCPhysSecond);
8514 if (rcStrict != VINF_SUCCESS)
8515 return rcStrict;
8516 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8517
8518 PVM pVM = pVCpu->CTX_SUFF(pVM);
8519#ifdef IEM_VERIFICATION_MODE_FULL
8520 /*
8521 * Detect problematic memory when verifying so we can select
8522 * the right execution engine. (TLB: Redo this.)
8523 */
8524 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))
8525 {
8526 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8527 if (RT_SUCCESS(rc2))
8528 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);
8529 if (RT_FAILURE(rc2))
8530 pVCpu->iem.s.fProblematicMemory = true;
8531 }
8532#endif
8533
8534
8535 /*
8536 * Read in the current memory content if it's a read, execute or partial
8537 * write access.
8538 */
8539 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8540 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8541 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8542
8543 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8544 {
8545 if (!pVCpu->iem.s.fBypassHandlers)
8546 {
8547 /*
8548 * Must carefully deal with access handler status codes here,
8549 * makes the code a bit bloated.
8550 */
8551 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8552 if (rcStrict == VINF_SUCCESS)
8553 {
8554 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8555 if (rcStrict == VINF_SUCCESS)
8556 { /*likely */ }
8557 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8558 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8559 else
8560 {
8561 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8562 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8563 return rcStrict;
8564 }
8565 }
8566 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8567 {
8568 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8569 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8570 {
8571 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8572 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8573 }
8574 else
8575 {
8576 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8577 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8578 return rcStrict2;
8579 }
8580 }
8581 else
8582 {
8583 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8584 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8585 return rcStrict;
8586 }
8587 }
8588 else
8589 {
8590 /*
8591 * No informational status codes here, much more straight forward.
8592 */
8593 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8594 if (RT_SUCCESS(rc))
8595 {
8596 Assert(rc == VINF_SUCCESS);
8597 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8598 if (RT_SUCCESS(rc))
8599 Assert(rc == VINF_SUCCESS);
8600 else
8601 {
8602 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8603 return rc;
8604 }
8605 }
8606 else
8607 {
8608 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8609 return rc;
8610 }
8611 }
8612
8613#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8614 if ( !pVCpu->iem.s.fNoRem
8615 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8616 {
8617 /*
8618 * Record the reads.
8619 */
8620 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8621 if (pEvtRec)
8622 {
8623 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8624 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8625 pEvtRec->u.RamRead.cb = cbFirstPage;
8626 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8627 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8628 }
8629 pEvtRec = iemVerifyAllocRecord(pVCpu);
8630 if (pEvtRec)
8631 {
8632 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8633 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;
8634 pEvtRec->u.RamRead.cb = cbSecondPage;
8635 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8636 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8637 }
8638 }
8639#endif
8640 }
8641#ifdef VBOX_STRICT
8642 else
8643 memset(pbBuf, 0xcc, cbMem);
8644 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8645 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8646#endif
8647
8648 /*
8649 * Commit the bounce buffer entry.
8650 */
8651 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8652 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8653 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8654 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8655 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8656 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8657 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8658 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8659 pVCpu->iem.s.cActiveMappings++;
8660
8661 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8662 *ppvMem = pbBuf;
8663 return VINF_SUCCESS;
8664}
8665
8666
8667/**
8668 * iemMemMap woker that deals with iemMemPageMap failures.
8669 */
8670IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8671 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8672{
8673 /*
8674 * Filter out conditions we can handle and the ones which shouldn't happen.
8675 */
8676 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8677 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8678 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8679 {
8680 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8681 return rcMap;
8682 }
8683 pVCpu->iem.s.cPotentialExits++;
8684
8685 /*
8686 * Read in the current memory content if it's a read, execute or partial
8687 * write access.
8688 */
8689 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8690 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8691 {
8692 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8693 memset(pbBuf, 0xff, cbMem);
8694 else
8695 {
8696 int rc;
8697 if (!pVCpu->iem.s.fBypassHandlers)
8698 {
8699 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8700 if (rcStrict == VINF_SUCCESS)
8701 { /* nothing */ }
8702 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8703 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8704 else
8705 {
8706 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8707 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8708 return rcStrict;
8709 }
8710 }
8711 else
8712 {
8713 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8714 if (RT_SUCCESS(rc))
8715 { /* likely */ }
8716 else
8717 {
8718 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8719 GCPhysFirst, rc));
8720 return rc;
8721 }
8722 }
8723 }
8724
8725#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
8726 if ( !pVCpu->iem.s.fNoRem
8727 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )
8728 {
8729 /*
8730 * Record the read.
8731 */
8732 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
8733 if (pEvtRec)
8734 {
8735 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
8736 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;
8737 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;
8738 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
8739 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
8740 }
8741 }
8742#endif
8743 }
8744#ifdef VBOX_STRICT
8745 else
8746 memset(pbBuf, 0xcc, cbMem);
8747#endif
8748#ifdef VBOX_STRICT
8749 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8750 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8751#endif
8752
8753 /*
8754 * Commit the bounce buffer entry.
8755 */
8756 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8757 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8758 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8759 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8760 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8761 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8762 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8763 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8764 pVCpu->iem.s.cActiveMappings++;
8765
8766 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8767 *ppvMem = pbBuf;
8768 return VINF_SUCCESS;
8769}
8770
8771
8772
8773/**
8774 * Maps the specified guest memory for the given kind of access.
8775 *
8776 * This may be using bounce buffering of the memory if it's crossing a page
8777 * boundary or if there is an access handler installed for any of it. Because
8778 * of lock prefix guarantees, we're in for some extra clutter when this
8779 * happens.
8780 *
8781 * This may raise a \#GP, \#SS, \#PF or \#AC.
8782 *
8783 * @returns VBox strict status code.
8784 *
8785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8786 * @param ppvMem Where to return the pointer to the mapped
8787 * memory.
8788 * @param cbMem The number of bytes to map. This is usually 1,
8789 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8790 * string operations it can be up to a page.
8791 * @param iSegReg The index of the segment register to use for
8792 * this access. The base and limits are checked.
8793 * Use UINT8_MAX to indicate that no segmentation
8794 * is required (for IDT, GDT and LDT accesses).
8795 * @param GCPtrMem The address of the guest memory.
8796 * @param fAccess How the memory is being accessed. The
8797 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8798 * how to map the memory, while the
8799 * IEM_ACCESS_WHAT_XXX bit is used when raising
8800 * exceptions.
8801 */
8802IEM_STATIC VBOXSTRICTRC
8803iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8804{
8805 /*
8806 * Check the input and figure out which mapping entry to use.
8807 */
8808 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8809 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8810 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8811
8812 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8813 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8814 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8815 {
8816 iMemMap = iemMemMapFindFree(pVCpu);
8817 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8818 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8819 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8820 pVCpu->iem.s.aMemMappings[2].fAccess),
8821 VERR_IEM_IPE_9);
8822 }
8823
8824 /*
8825 * Map the memory, checking that we can actually access it. If something
8826 * slightly complicated happens, fall back on bounce buffering.
8827 */
8828 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8829 if (rcStrict != VINF_SUCCESS)
8830 return rcStrict;
8831
8832 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8833 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8834
8835 RTGCPHYS GCPhysFirst;
8836 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8837 if (rcStrict != VINF_SUCCESS)
8838 return rcStrict;
8839
8840 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8841 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8842 if (fAccess & IEM_ACCESS_TYPE_READ)
8843 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8844
8845 void *pvMem;
8846 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8847 if (rcStrict != VINF_SUCCESS)
8848 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8849
8850 /*
8851 * Fill in the mapping table entry.
8852 */
8853 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8854 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8855 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8856 pVCpu->iem.s.cActiveMappings++;
8857
8858 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8859 *ppvMem = pvMem;
8860 return VINF_SUCCESS;
8861}
8862
8863
8864/**
8865 * Commits the guest memory if bounce buffered and unmaps it.
8866 *
8867 * @returns Strict VBox status code.
8868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8869 * @param pvMem The mapping.
8870 * @param fAccess The kind of access.
8871 */
8872IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8873{
8874 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8875 AssertReturn(iMemMap >= 0, iMemMap);
8876
8877 /* If it's bounce buffered, we may need to write back the buffer. */
8878 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8879 {
8880 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8881 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8882 }
8883 /* Otherwise unlock it. */
8884 else
8885 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8886
8887 /* Free the entry. */
8888 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8889 Assert(pVCpu->iem.s.cActiveMappings != 0);
8890 pVCpu->iem.s.cActiveMappings--;
8891 return VINF_SUCCESS;
8892}
8893
8894#ifdef IEM_WITH_SETJMP
8895
8896/**
8897 * Maps the specified guest memory for the given kind of access, longjmp on
8898 * error.
8899 *
8900 * This may be using bounce buffering of the memory if it's crossing a page
8901 * boundary or if there is an access handler installed for any of it. Because
8902 * of lock prefix guarantees, we're in for some extra clutter when this
8903 * happens.
8904 *
8905 * This may raise a \#GP, \#SS, \#PF or \#AC.
8906 *
8907 * @returns Pointer to the mapped memory.
8908 *
8909 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8910 * @param cbMem The number of bytes to map. This is usually 1,
8911 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8912 * string operations it can be up to a page.
8913 * @param iSegReg The index of the segment register to use for
8914 * this access. The base and limits are checked.
8915 * Use UINT8_MAX to indicate that no segmentation
8916 * is required (for IDT, GDT and LDT accesses).
8917 * @param GCPtrMem The address of the guest memory.
8918 * @param fAccess How the memory is being accessed. The
8919 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8920 * how to map the memory, while the
8921 * IEM_ACCESS_WHAT_XXX bit is used when raising
8922 * exceptions.
8923 */
8924IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8925{
8926 /*
8927 * Check the input and figure out which mapping entry to use.
8928 */
8929 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8930 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8931 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8932
8933 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8934 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8935 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8936 {
8937 iMemMap = iemMemMapFindFree(pVCpu);
8938 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8939 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8940 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8941 pVCpu->iem.s.aMemMappings[2].fAccess),
8942 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8943 }
8944
8945 /*
8946 * Map the memory, checking that we can actually access it. If something
8947 * slightly complicated happens, fall back on bounce buffering.
8948 */
8949 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8950 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8951 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8952
8953 /* Crossing a page boundary? */
8954 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8955 { /* No (likely). */ }
8956 else
8957 {
8958 void *pvMem;
8959 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8960 if (rcStrict == VINF_SUCCESS)
8961 return pvMem;
8962 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8963 }
8964
8965 RTGCPHYS GCPhysFirst;
8966 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8967 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8968 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8969
8970 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8971 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8972 if (fAccess & IEM_ACCESS_TYPE_READ)
8973 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8974
8975 void *pvMem;
8976 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8977 if (rcStrict == VINF_SUCCESS)
8978 { /* likely */ }
8979 else
8980 {
8981 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8982 if (rcStrict == VINF_SUCCESS)
8983 return pvMem;
8984 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8985 }
8986
8987 /*
8988 * Fill in the mapping table entry.
8989 */
8990 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8991 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8992 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8993 pVCpu->iem.s.cActiveMappings++;
8994
8995 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8996 return pvMem;
8997}
8998
8999
9000/**
9001 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9002 *
9003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9004 * @param pvMem The mapping.
9005 * @param fAccess The kind of access.
9006 */
9007IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9008{
9009 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9010 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9011
9012 /* If it's bounce buffered, we may need to write back the buffer. */
9013 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9014 {
9015 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9016 {
9017 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9018 if (rcStrict == VINF_SUCCESS)
9019 return;
9020 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9021 }
9022 }
9023 /* Otherwise unlock it. */
9024 else
9025 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9026
9027 /* Free the entry. */
9028 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9029 Assert(pVCpu->iem.s.cActiveMappings != 0);
9030 pVCpu->iem.s.cActiveMappings--;
9031}
9032
9033#endif
9034
9035#ifndef IN_RING3
9036/**
9037 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9038 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9039 *
9040 * Allows the instruction to be completed and retired, while the IEM user will
9041 * return to ring-3 immediately afterwards and do the postponed writes there.
9042 *
9043 * @returns VBox status code (no strict statuses). Caller must check
9044 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9046 * @param pvMem The mapping.
9047 * @param fAccess The kind of access.
9048 */
9049IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9050{
9051 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9052 AssertReturn(iMemMap >= 0, iMemMap);
9053
9054 /* If it's bounce buffered, we may need to write back the buffer. */
9055 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9056 {
9057 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9058 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9059 }
9060 /* Otherwise unlock it. */
9061 else
9062 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9063
9064 /* Free the entry. */
9065 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9066 Assert(pVCpu->iem.s.cActiveMappings != 0);
9067 pVCpu->iem.s.cActiveMappings--;
9068 return VINF_SUCCESS;
9069}
9070#endif
9071
9072
9073/**
9074 * Rollbacks mappings, releasing page locks and such.
9075 *
9076 * The caller shall only call this after checking cActiveMappings.
9077 *
9078 * @returns Strict VBox status code to pass up.
9079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9080 */
9081IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9082{
9083 Assert(pVCpu->iem.s.cActiveMappings > 0);
9084
9085 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9086 while (iMemMap-- > 0)
9087 {
9088 uint32_t fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9089 if (fAccess != IEM_ACCESS_INVALID)
9090 {
9091 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9092 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9093 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9094 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9095 Assert(pVCpu->iem.s.cActiveMappings > 0);
9096 pVCpu->iem.s.cActiveMappings--;
9097 }
9098 }
9099}
9100
9101
9102/**
9103 * Fetches a data byte.
9104 *
9105 * @returns Strict VBox status code.
9106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9107 * @param pu8Dst Where to return the byte.
9108 * @param iSegReg The index of the segment register to use for
9109 * this access. The base and limits are checked.
9110 * @param GCPtrMem The address of the guest memory.
9111 */
9112IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9113{
9114 /* The lazy approach for now... */
9115 uint8_t const *pu8Src;
9116 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9117 if (rc == VINF_SUCCESS)
9118 {
9119 *pu8Dst = *pu8Src;
9120 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9121 }
9122 return rc;
9123}
9124
9125
9126#ifdef IEM_WITH_SETJMP
9127/**
9128 * Fetches a data byte, longjmp on error.
9129 *
9130 * @returns The byte.
9131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9132 * @param iSegReg The index of the segment register to use for
9133 * this access. The base and limits are checked.
9134 * @param GCPtrMem The address of the guest memory.
9135 */
9136DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9137{
9138 /* The lazy approach for now... */
9139 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9140 uint8_t const bRet = *pu8Src;
9141 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9142 return bRet;
9143}
9144#endif /* IEM_WITH_SETJMP */
9145
9146
9147/**
9148 * Fetches a data word.
9149 *
9150 * @returns Strict VBox status code.
9151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9152 * @param pu16Dst Where to return the word.
9153 * @param iSegReg The index of the segment register to use for
9154 * this access. The base and limits are checked.
9155 * @param GCPtrMem The address of the guest memory.
9156 */
9157IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9158{
9159 /* The lazy approach for now... */
9160 uint16_t const *pu16Src;
9161 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9162 if (rc == VINF_SUCCESS)
9163 {
9164 *pu16Dst = *pu16Src;
9165 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9166 }
9167 return rc;
9168}
9169
9170
9171#ifdef IEM_WITH_SETJMP
9172/**
9173 * Fetches a data word, longjmp on error.
9174 *
9175 * @returns The word
9176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9177 * @param iSegReg The index of the segment register to use for
9178 * this access. The base and limits are checked.
9179 * @param GCPtrMem The address of the guest memory.
9180 */
9181DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9182{
9183 /* The lazy approach for now... */
9184 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9185 uint16_t const u16Ret = *pu16Src;
9186 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9187 return u16Ret;
9188}
9189#endif
9190
9191
9192/**
9193 * Fetches a data dword.
9194 *
9195 * @returns Strict VBox status code.
9196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9197 * @param pu32Dst Where to return the dword.
9198 * @param iSegReg The index of the segment register to use for
9199 * this access. The base and limits are checked.
9200 * @param GCPtrMem The address of the guest memory.
9201 */
9202IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9203{
9204 /* The lazy approach for now... */
9205 uint32_t const *pu32Src;
9206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9207 if (rc == VINF_SUCCESS)
9208 {
9209 *pu32Dst = *pu32Src;
9210 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9211 }
9212 return rc;
9213}
9214
9215
9216#ifdef IEM_WITH_SETJMP
9217
9218IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9219{
9220 Assert(cbMem >= 1);
9221 Assert(iSegReg < X86_SREG_COUNT);
9222
9223 /*
9224 * 64-bit mode is simpler.
9225 */
9226 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9227 {
9228 if (iSegReg >= X86_SREG_FS)
9229 {
9230 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9231 GCPtrMem += pSel->u64Base;
9232 }
9233
9234 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9235 return GCPtrMem;
9236 }
9237 /*
9238 * 16-bit and 32-bit segmentation.
9239 */
9240 else
9241 {
9242 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9243 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9244 == X86DESCATTR_P /* data, expand up */
9245 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9246 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9247 {
9248 /* expand up */
9249 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9250 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9251 && GCPtrLast32 > (uint32_t)GCPtrMem))
9252 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9253 }
9254 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9255 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9256 {
9257 /* expand down */
9258 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9259 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9260 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9261 && GCPtrLast32 > (uint32_t)GCPtrMem))
9262 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9263 }
9264 else
9265 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9266 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9267 }
9268 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9269}
9270
9271
9272IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9273{
9274 Assert(cbMem >= 1);
9275 Assert(iSegReg < X86_SREG_COUNT);
9276
9277 /*
9278 * 64-bit mode is simpler.
9279 */
9280 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9281 {
9282 if (iSegReg >= X86_SREG_FS)
9283 {
9284 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9285 GCPtrMem += pSel->u64Base;
9286 }
9287
9288 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9289 return GCPtrMem;
9290 }
9291 /*
9292 * 16-bit and 32-bit segmentation.
9293 */
9294 else
9295 {
9296 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9297 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9298 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9299 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9300 {
9301 /* expand up */
9302 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9303 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9304 && GCPtrLast32 > (uint32_t)GCPtrMem))
9305 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9306 }
9307 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9308 {
9309 /* expand down */
9310 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9311 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9312 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9313 && GCPtrLast32 > (uint32_t)GCPtrMem))
9314 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9315 }
9316 else
9317 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9318 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9319 }
9320 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9321}
9322
9323
9324/**
9325 * Fetches a data dword, longjmp on error, fallback/safe version.
9326 *
9327 * @returns The dword
9328 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9329 * @param iSegReg The index of the segment register to use for
9330 * this access. The base and limits are checked.
9331 * @param GCPtrMem The address of the guest memory.
9332 */
9333IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9334{
9335 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9336 uint32_t const u32Ret = *pu32Src;
9337 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9338 return u32Ret;
9339}
9340
9341
9342/**
9343 * Fetches a data dword, longjmp on error.
9344 *
9345 * @returns The dword
9346 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9347 * @param iSegReg The index of the segment register to use for
9348 * this access. The base and limits are checked.
9349 * @param GCPtrMem The address of the guest memory.
9350 */
9351DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9352{
9353# ifdef IEM_WITH_DATA_TLB
9354 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9355 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9356 {
9357 /// @todo more later.
9358 }
9359
9360 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9361# else
9362 /* The lazy approach. */
9363 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9364 uint32_t const u32Ret = *pu32Src;
9365 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9366 return u32Ret;
9367# endif
9368}
9369#endif
9370
9371
9372#ifdef SOME_UNUSED_FUNCTION
9373/**
9374 * Fetches a data dword and sign extends it to a qword.
9375 *
9376 * @returns Strict VBox status code.
9377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9378 * @param pu64Dst Where to return the sign extended value.
9379 * @param iSegReg The index of the segment register to use for
9380 * this access. The base and limits are checked.
9381 * @param GCPtrMem The address of the guest memory.
9382 */
9383IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9384{
9385 /* The lazy approach for now... */
9386 int32_t const *pi32Src;
9387 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9388 if (rc == VINF_SUCCESS)
9389 {
9390 *pu64Dst = *pi32Src;
9391 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9392 }
9393#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9394 else
9395 *pu64Dst = 0;
9396#endif
9397 return rc;
9398}
9399#endif
9400
9401
9402/**
9403 * Fetches a data qword.
9404 *
9405 * @returns Strict VBox status code.
9406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9407 * @param pu64Dst Where to return the qword.
9408 * @param iSegReg The index of the segment register to use for
9409 * this access. The base and limits are checked.
9410 * @param GCPtrMem The address of the guest memory.
9411 */
9412IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9413{
9414 /* The lazy approach for now... */
9415 uint64_t const *pu64Src;
9416 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9417 if (rc == VINF_SUCCESS)
9418 {
9419 *pu64Dst = *pu64Src;
9420 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9421 }
9422 return rc;
9423}
9424
9425
9426#ifdef IEM_WITH_SETJMP
9427/**
9428 * Fetches a data qword, longjmp on error.
9429 *
9430 * @returns The qword.
9431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9432 * @param iSegReg The index of the segment register to use for
9433 * this access. The base and limits are checked.
9434 * @param GCPtrMem The address of the guest memory.
9435 */
9436DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9437{
9438 /* The lazy approach for now... */
9439 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9440 uint64_t const u64Ret = *pu64Src;
9441 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9442 return u64Ret;
9443}
9444#endif
9445
9446
9447/**
9448 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9449 *
9450 * @returns Strict VBox status code.
9451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9452 * @param pu64Dst Where to return the qword.
9453 * @param iSegReg The index of the segment register to use for
9454 * this access. The base and limits are checked.
9455 * @param GCPtrMem The address of the guest memory.
9456 */
9457IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9458{
9459 /* The lazy approach for now... */
9460 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9461 if (RT_UNLIKELY(GCPtrMem & 15))
9462 return iemRaiseGeneralProtectionFault0(pVCpu);
9463
9464 uint64_t const *pu64Src;
9465 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9466 if (rc == VINF_SUCCESS)
9467 {
9468 *pu64Dst = *pu64Src;
9469 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9470 }
9471 return rc;
9472}
9473
9474
9475#ifdef IEM_WITH_SETJMP
9476/**
9477 * Fetches a data qword, longjmp on error.
9478 *
9479 * @returns The qword.
9480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9481 * @param iSegReg The index of the segment register to use for
9482 * this access. The base and limits are checked.
9483 * @param GCPtrMem The address of the guest memory.
9484 */
9485DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9486{
9487 /* The lazy approach for now... */
9488 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9489 if (RT_LIKELY(!(GCPtrMem & 15)))
9490 {
9491 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9492 uint64_t const u64Ret = *pu64Src;
9493 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9494 return u64Ret;
9495 }
9496
9497 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9498 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9499}
9500#endif
9501
9502
9503/**
9504 * Fetches a data tword.
9505 *
9506 * @returns Strict VBox status code.
9507 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9508 * @param pr80Dst Where to return the tword.
9509 * @param iSegReg The index of the segment register to use for
9510 * this access. The base and limits are checked.
9511 * @param GCPtrMem The address of the guest memory.
9512 */
9513IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9514{
9515 /* The lazy approach for now... */
9516 PCRTFLOAT80U pr80Src;
9517 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9518 if (rc == VINF_SUCCESS)
9519 {
9520 *pr80Dst = *pr80Src;
9521 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9522 }
9523 return rc;
9524}
9525
9526
9527#ifdef IEM_WITH_SETJMP
9528/**
9529 * Fetches a data tword, longjmp on error.
9530 *
9531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9532 * @param pr80Dst Where to return the tword.
9533 * @param iSegReg The index of the segment register to use for
9534 * this access. The base and limits are checked.
9535 * @param GCPtrMem The address of the guest memory.
9536 */
9537DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9538{
9539 /* The lazy approach for now... */
9540 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9541 *pr80Dst = *pr80Src;
9542 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9543}
9544#endif
9545
9546
9547/**
9548 * Fetches a data dqword (double qword), generally SSE related.
9549 *
9550 * @returns Strict VBox status code.
9551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9552 * @param pu128Dst Where to return the qword.
9553 * @param iSegReg The index of the segment register to use for
9554 * this access. The base and limits are checked.
9555 * @param GCPtrMem The address of the guest memory.
9556 */
9557IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9558{
9559 /* The lazy approach for now... */
9560 PCRTUINT128U pu128Src;
9561 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9562 if (rc == VINF_SUCCESS)
9563 {
9564 pu128Dst->au64[0] = pu128Src->au64[0];
9565 pu128Dst->au64[1] = pu128Src->au64[1];
9566 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9567 }
9568 return rc;
9569}
9570
9571
9572#ifdef IEM_WITH_SETJMP
9573/**
9574 * Fetches a data dqword (double qword), generally SSE related.
9575 *
9576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9577 * @param pu128Dst Where to return the qword.
9578 * @param iSegReg The index of the segment register to use for
9579 * this access. The base and limits are checked.
9580 * @param GCPtrMem The address of the guest memory.
9581 */
9582IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9583{
9584 /* The lazy approach for now... */
9585 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9586 pu128Dst->au64[0] = pu128Src->au64[0];
9587 pu128Dst->au64[1] = pu128Src->au64[1];
9588 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9589}
9590#endif
9591
9592
9593/**
9594 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9595 * related.
9596 *
9597 * Raises \#GP(0) if not aligned.
9598 *
9599 * @returns Strict VBox status code.
9600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9601 * @param pu128Dst Where to return the qword.
9602 * @param iSegReg The index of the segment register to use for
9603 * this access. The base and limits are checked.
9604 * @param GCPtrMem The address of the guest memory.
9605 */
9606IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9607{
9608 /* The lazy approach for now... */
9609 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9610 if ( (GCPtrMem & 15)
9611 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9612 return iemRaiseGeneralProtectionFault0(pVCpu);
9613
9614 PCRTUINT128U pu128Src;
9615 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9616 if (rc == VINF_SUCCESS)
9617 {
9618 pu128Dst->au64[0] = pu128Src->au64[0];
9619 pu128Dst->au64[1] = pu128Src->au64[1];
9620 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9621 }
9622 return rc;
9623}
9624
9625
9626#ifdef IEM_WITH_SETJMP
9627/**
9628 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9629 * related, longjmp on error.
9630 *
9631 * Raises \#GP(0) if not aligned.
9632 *
9633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9634 * @param pu128Dst Where to return the qword.
9635 * @param iSegReg The index of the segment register to use for
9636 * this access. The base and limits are checked.
9637 * @param GCPtrMem The address of the guest memory.
9638 */
9639DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9640{
9641 /* The lazy approach for now... */
9642 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9643 if ( (GCPtrMem & 15) == 0
9644 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9645 {
9646 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9647 pu128Dst->au64[0] = pu128Src->au64[0];
9648 pu128Dst->au64[1] = pu128Src->au64[1];
9649 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9650 return;
9651 }
9652
9653 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9654 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9655}
9656#endif
9657
9658
9659
9660/**
9661 * Fetches a descriptor register (lgdt, lidt).
9662 *
9663 * @returns Strict VBox status code.
9664 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9665 * @param pcbLimit Where to return the limit.
9666 * @param pGCPtrBase Where to return the base.
9667 * @param iSegReg The index of the segment register to use for
9668 * this access. The base and limits are checked.
9669 * @param GCPtrMem The address of the guest memory.
9670 * @param enmOpSize The effective operand size.
9671 */
9672IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9673 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9674{
9675 /*
9676 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9677 * little special:
9678 * - The two reads are done separately.
9679 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9680 * - We suspect the 386 to actually commit the limit before the base in
9681 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9682 * don't try emulate this eccentric behavior, because it's not well
9683 * enough understood and rather hard to trigger.
9684 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9685 */
9686 VBOXSTRICTRC rcStrict;
9687 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9688 {
9689 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9690 if (rcStrict == VINF_SUCCESS)
9691 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9692 }
9693 else
9694 {
9695 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9696 if (enmOpSize == IEMMODE_32BIT)
9697 {
9698 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9699 {
9700 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9701 if (rcStrict == VINF_SUCCESS)
9702 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9703 }
9704 else
9705 {
9706 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9707 if (rcStrict == VINF_SUCCESS)
9708 {
9709 *pcbLimit = (uint16_t)uTmp;
9710 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9711 }
9712 }
9713 if (rcStrict == VINF_SUCCESS)
9714 *pGCPtrBase = uTmp;
9715 }
9716 else
9717 {
9718 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9719 if (rcStrict == VINF_SUCCESS)
9720 {
9721 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9722 if (rcStrict == VINF_SUCCESS)
9723 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9724 }
9725 }
9726 }
9727 return rcStrict;
9728}
9729
9730
9731
9732/**
9733 * Stores a data byte.
9734 *
9735 * @returns Strict VBox status code.
9736 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9737 * @param iSegReg The index of the segment register to use for
9738 * this access. The base and limits are checked.
9739 * @param GCPtrMem The address of the guest memory.
9740 * @param u8Value The value to store.
9741 */
9742IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9743{
9744 /* The lazy approach for now... */
9745 uint8_t *pu8Dst;
9746 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9747 if (rc == VINF_SUCCESS)
9748 {
9749 *pu8Dst = u8Value;
9750 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9751 }
9752 return rc;
9753}
9754
9755
9756#ifdef IEM_WITH_SETJMP
9757/**
9758 * Stores a data byte, longjmp on error.
9759 *
9760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9761 * @param iSegReg The index of the segment register to use for
9762 * this access. The base and limits are checked.
9763 * @param GCPtrMem The address of the guest memory.
9764 * @param u8Value The value to store.
9765 */
9766IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9767{
9768 /* The lazy approach for now... */
9769 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9770 *pu8Dst = u8Value;
9771 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9772}
9773#endif
9774
9775
9776/**
9777 * Stores a data word.
9778 *
9779 * @returns Strict VBox status code.
9780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9781 * @param iSegReg The index of the segment register to use for
9782 * this access. The base and limits are checked.
9783 * @param GCPtrMem The address of the guest memory.
9784 * @param u16Value The value to store.
9785 */
9786IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9787{
9788 /* The lazy approach for now... */
9789 uint16_t *pu16Dst;
9790 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9791 if (rc == VINF_SUCCESS)
9792 {
9793 *pu16Dst = u16Value;
9794 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9795 }
9796 return rc;
9797}
9798
9799
9800#ifdef IEM_WITH_SETJMP
9801/**
9802 * Stores a data word, longjmp on error.
9803 *
9804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9805 * @param iSegReg The index of the segment register to use for
9806 * this access. The base and limits are checked.
9807 * @param GCPtrMem The address of the guest memory.
9808 * @param u16Value The value to store.
9809 */
9810IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9811{
9812 /* The lazy approach for now... */
9813 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9814 *pu16Dst = u16Value;
9815 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9816}
9817#endif
9818
9819
9820/**
9821 * Stores a data dword.
9822 *
9823 * @returns Strict VBox status code.
9824 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9825 * @param iSegReg The index of the segment register to use for
9826 * this access. The base and limits are checked.
9827 * @param GCPtrMem The address of the guest memory.
9828 * @param u32Value The value to store.
9829 */
9830IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9831{
9832 /* The lazy approach for now... */
9833 uint32_t *pu32Dst;
9834 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9835 if (rc == VINF_SUCCESS)
9836 {
9837 *pu32Dst = u32Value;
9838 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9839 }
9840 return rc;
9841}
9842
9843
9844#ifdef IEM_WITH_SETJMP
9845/**
9846 * Stores a data dword.
9847 *
9848 * @returns Strict VBox status code.
9849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9850 * @param iSegReg The index of the segment register to use for
9851 * this access. The base and limits are checked.
9852 * @param GCPtrMem The address of the guest memory.
9853 * @param u32Value The value to store.
9854 */
9855IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9856{
9857 /* The lazy approach for now... */
9858 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9859 *pu32Dst = u32Value;
9860 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9861}
9862#endif
9863
9864
9865/**
9866 * Stores a data qword.
9867 *
9868 * @returns Strict VBox status code.
9869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9870 * @param iSegReg The index of the segment register to use for
9871 * this access. The base and limits are checked.
9872 * @param GCPtrMem The address of the guest memory.
9873 * @param u64Value The value to store.
9874 */
9875IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9876{
9877 /* The lazy approach for now... */
9878 uint64_t *pu64Dst;
9879 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9880 if (rc == VINF_SUCCESS)
9881 {
9882 *pu64Dst = u64Value;
9883 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9884 }
9885 return rc;
9886}
9887
9888
9889#ifdef IEM_WITH_SETJMP
9890/**
9891 * Stores a data qword, longjmp on error.
9892 *
9893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9894 * @param iSegReg The index of the segment register to use for
9895 * this access. The base and limits are checked.
9896 * @param GCPtrMem The address of the guest memory.
9897 * @param u64Value The value to store.
9898 */
9899IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9900{
9901 /* The lazy approach for now... */
9902 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9903 *pu64Dst = u64Value;
9904 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9905}
9906#endif
9907
9908
9909/**
9910 * Stores a data dqword.
9911 *
9912 * @returns Strict VBox status code.
9913 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9914 * @param iSegReg The index of the segment register to use for
9915 * this access. The base and limits are checked.
9916 * @param GCPtrMem The address of the guest memory.
9917 * @param u128Value The value to store.
9918 */
9919IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9920{
9921 /* The lazy approach for now... */
9922 PRTUINT128U pu128Dst;
9923 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9924 if (rc == VINF_SUCCESS)
9925 {
9926 pu128Dst->au64[0] = u128Value.au64[0];
9927 pu128Dst->au64[1] = u128Value.au64[1];
9928 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9929 }
9930 return rc;
9931}
9932
9933
9934#ifdef IEM_WITH_SETJMP
9935/**
9936 * Stores a data dqword, longjmp on error.
9937 *
9938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9939 * @param iSegReg The index of the segment register to use for
9940 * this access. The base and limits are checked.
9941 * @param GCPtrMem The address of the guest memory.
9942 * @param u128Value The value to store.
9943 */
9944IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9945{
9946 /* The lazy approach for now... */
9947 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9948 pu128Dst->au64[0] = u128Value.au64[0];
9949 pu128Dst->au64[1] = u128Value.au64[1];
9950 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9951}
9952#endif
9953
9954
9955/**
9956 * Stores a data dqword, SSE aligned.
9957 *
9958 * @returns Strict VBox status code.
9959 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9960 * @param iSegReg The index of the segment register to use for
9961 * this access. The base and limits are checked.
9962 * @param GCPtrMem The address of the guest memory.
9963 * @param u128Value The value to store.
9964 */
9965IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9966{
9967 /* The lazy approach for now... */
9968 if ( (GCPtrMem & 15)
9969 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9970 return iemRaiseGeneralProtectionFault0(pVCpu);
9971
9972 PRTUINT128U pu128Dst;
9973 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9974 if (rc == VINF_SUCCESS)
9975 {
9976 pu128Dst->au64[0] = u128Value.au64[0];
9977 pu128Dst->au64[1] = u128Value.au64[1];
9978 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9979 }
9980 return rc;
9981}
9982
9983
9984#ifdef IEM_WITH_SETJMP
9985/**
9986 * Stores a data dqword, SSE aligned.
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9990 * @param iSegReg The index of the segment register to use for
9991 * this access. The base and limits are checked.
9992 * @param GCPtrMem The address of the guest memory.
9993 * @param u128Value The value to store.
9994 */
9995DECL_NO_INLINE(IEM_STATIC, void)
9996iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9997{
9998 /* The lazy approach for now... */
9999 if ( (GCPtrMem & 15) == 0
10000 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10001 {
10002 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10003 pu128Dst->au64[0] = u128Value.au64[0];
10004 pu128Dst->au64[1] = u128Value.au64[1];
10005 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10006 return;
10007 }
10008
10009 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10010 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10011}
10012#endif
10013
10014
10015/**
10016 * Stores a descriptor register (sgdt, sidt).
10017 *
10018 * @returns Strict VBox status code.
10019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10020 * @param cbLimit The limit.
10021 * @param GCPtrBase The base address.
10022 * @param iSegReg The index of the segment register to use for
10023 * this access. The base and limits are checked.
10024 * @param GCPtrMem The address of the guest memory.
10025 */
10026IEM_STATIC VBOXSTRICTRC
10027iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10028{
10029 VBOXSTRICTRC rcStrict;
10030 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_IDTR_READS))
10031 {
10032 Log(("sidt/sgdt: Guest intercept -> #VMEXIT\n"));
10033 IEM_RETURN_SVM_NST_GST_VMEXIT(pVCpu, SVM_EXIT_IDTR_READ, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
10034 }
10035
10036 /*
10037 * The SIDT and SGDT instructions actually stores the data using two
10038 * independent writes. The instructions does not respond to opsize prefixes.
10039 */
10040 rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10041 if (rcStrict == VINF_SUCCESS)
10042 {
10043 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10044 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10045 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10046 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10047 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10048 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10049 else
10050 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10051 }
10052 return rcStrict;
10053}
10054
10055
10056/**
10057 * Pushes a word onto the stack.
10058 *
10059 * @returns Strict VBox status code.
10060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10061 * @param u16Value The value to push.
10062 */
10063IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10064{
10065 /* Increment the stack pointer. */
10066 uint64_t uNewRsp;
10067 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10068 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10069
10070 /* Write the word the lazy way. */
10071 uint16_t *pu16Dst;
10072 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10073 if (rc == VINF_SUCCESS)
10074 {
10075 *pu16Dst = u16Value;
10076 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10077 }
10078
10079 /* Commit the new RSP value unless we an access handler made trouble. */
10080 if (rc == VINF_SUCCESS)
10081 pCtx->rsp = uNewRsp;
10082
10083 return rc;
10084}
10085
10086
10087/**
10088 * Pushes a dword onto the stack.
10089 *
10090 * @returns Strict VBox status code.
10091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10092 * @param u32Value The value to push.
10093 */
10094IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10095{
10096 /* Increment the stack pointer. */
10097 uint64_t uNewRsp;
10098 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10099 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10100
10101 /* Write the dword the lazy way. */
10102 uint32_t *pu32Dst;
10103 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10104 if (rc == VINF_SUCCESS)
10105 {
10106 *pu32Dst = u32Value;
10107 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10108 }
10109
10110 /* Commit the new RSP value unless we an access handler made trouble. */
10111 if (rc == VINF_SUCCESS)
10112 pCtx->rsp = uNewRsp;
10113
10114 return rc;
10115}
10116
10117
10118/**
10119 * Pushes a dword segment register value onto the stack.
10120 *
10121 * @returns Strict VBox status code.
10122 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10123 * @param u32Value The value to push.
10124 */
10125IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10126{
10127 /* Increment the stack pointer. */
10128 uint64_t uNewRsp;
10129 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10130 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10131
10132 VBOXSTRICTRC rc;
10133 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))
10134 {
10135 /* The recompiler writes a full dword. */
10136 uint32_t *pu32Dst;
10137 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10138 if (rc == VINF_SUCCESS)
10139 {
10140 *pu32Dst = u32Value;
10141 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10142 }
10143 }
10144 else
10145 {
10146 /* The intel docs talks about zero extending the selector register
10147 value. My actual intel CPU here might be zero extending the value
10148 but it still only writes the lower word... */
10149 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10150 * happens when crossing an electric page boundrary, is the high word checked
10151 * for write accessibility or not? Probably it is. What about segment limits?
10152 * It appears this behavior is also shared with trap error codes.
10153 *
10154 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10155 * ancient hardware when it actually did change. */
10156 uint16_t *pu16Dst;
10157 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10158 if (rc == VINF_SUCCESS)
10159 {
10160 *pu16Dst = (uint16_t)u32Value;
10161 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10162 }
10163 }
10164
10165 /* Commit the new RSP value unless we an access handler made trouble. */
10166 if (rc == VINF_SUCCESS)
10167 pCtx->rsp = uNewRsp;
10168
10169 return rc;
10170}
10171
10172
10173/**
10174 * Pushes a qword onto the stack.
10175 *
10176 * @returns Strict VBox status code.
10177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10178 * @param u64Value The value to push.
10179 */
10180IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10181{
10182 /* Increment the stack pointer. */
10183 uint64_t uNewRsp;
10184 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10185 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10186
10187 /* Write the word the lazy way. */
10188 uint64_t *pu64Dst;
10189 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10190 if (rc == VINF_SUCCESS)
10191 {
10192 *pu64Dst = u64Value;
10193 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10194 }
10195
10196 /* Commit the new RSP value unless we an access handler made trouble. */
10197 if (rc == VINF_SUCCESS)
10198 pCtx->rsp = uNewRsp;
10199
10200 return rc;
10201}
10202
10203
10204/**
10205 * Pops a word from the stack.
10206 *
10207 * @returns Strict VBox status code.
10208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10209 * @param pu16Value Where to store the popped value.
10210 */
10211IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10212{
10213 /* Increment the stack pointer. */
10214 uint64_t uNewRsp;
10215 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10216 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10217
10218 /* Write the word the lazy way. */
10219 uint16_t const *pu16Src;
10220 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10221 if (rc == VINF_SUCCESS)
10222 {
10223 *pu16Value = *pu16Src;
10224 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10225
10226 /* Commit the new RSP value. */
10227 if (rc == VINF_SUCCESS)
10228 pCtx->rsp = uNewRsp;
10229 }
10230
10231 return rc;
10232}
10233
10234
10235/**
10236 * Pops a dword from the stack.
10237 *
10238 * @returns Strict VBox status code.
10239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10240 * @param pu32Value Where to store the popped value.
10241 */
10242IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10243{
10244 /* Increment the stack pointer. */
10245 uint64_t uNewRsp;
10246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10247 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10248
10249 /* Write the word the lazy way. */
10250 uint32_t const *pu32Src;
10251 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10252 if (rc == VINF_SUCCESS)
10253 {
10254 *pu32Value = *pu32Src;
10255 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10256
10257 /* Commit the new RSP value. */
10258 if (rc == VINF_SUCCESS)
10259 pCtx->rsp = uNewRsp;
10260 }
10261
10262 return rc;
10263}
10264
10265
10266/**
10267 * Pops a qword from the stack.
10268 *
10269 * @returns Strict VBox status code.
10270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10271 * @param pu64Value Where to store the popped value.
10272 */
10273IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10274{
10275 /* Increment the stack pointer. */
10276 uint64_t uNewRsp;
10277 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10278 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10279
10280 /* Write the word the lazy way. */
10281 uint64_t const *pu64Src;
10282 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10283 if (rc == VINF_SUCCESS)
10284 {
10285 *pu64Value = *pu64Src;
10286 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10287
10288 /* Commit the new RSP value. */
10289 if (rc == VINF_SUCCESS)
10290 pCtx->rsp = uNewRsp;
10291 }
10292
10293 return rc;
10294}
10295
10296
10297/**
10298 * Pushes a word onto the stack, using a temporary stack pointer.
10299 *
10300 * @returns Strict VBox status code.
10301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10302 * @param u16Value The value to push.
10303 * @param pTmpRsp Pointer to the temporary stack pointer.
10304 */
10305IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10306{
10307 /* Increment the stack pointer. */
10308 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10309 RTUINT64U NewRsp = *pTmpRsp;
10310 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10311
10312 /* Write the word the lazy way. */
10313 uint16_t *pu16Dst;
10314 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10315 if (rc == VINF_SUCCESS)
10316 {
10317 *pu16Dst = u16Value;
10318 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10319 }
10320
10321 /* Commit the new RSP value unless we an access handler made trouble. */
10322 if (rc == VINF_SUCCESS)
10323 *pTmpRsp = NewRsp;
10324
10325 return rc;
10326}
10327
10328
10329/**
10330 * Pushes a dword onto the stack, using a temporary stack pointer.
10331 *
10332 * @returns Strict VBox status code.
10333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10334 * @param u32Value The value to push.
10335 * @param pTmpRsp Pointer to the temporary stack pointer.
10336 */
10337IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10338{
10339 /* Increment the stack pointer. */
10340 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10341 RTUINT64U NewRsp = *pTmpRsp;
10342 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10343
10344 /* Write the word the lazy way. */
10345 uint32_t *pu32Dst;
10346 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10347 if (rc == VINF_SUCCESS)
10348 {
10349 *pu32Dst = u32Value;
10350 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10351 }
10352
10353 /* Commit the new RSP value unless we an access handler made trouble. */
10354 if (rc == VINF_SUCCESS)
10355 *pTmpRsp = NewRsp;
10356
10357 return rc;
10358}
10359
10360
10361/**
10362 * Pushes a dword onto the stack, using a temporary stack pointer.
10363 *
10364 * @returns Strict VBox status code.
10365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10366 * @param u64Value The value to push.
10367 * @param pTmpRsp Pointer to the temporary stack pointer.
10368 */
10369IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10370{
10371 /* Increment the stack pointer. */
10372 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10373 RTUINT64U NewRsp = *pTmpRsp;
10374 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10375
10376 /* Write the word the lazy way. */
10377 uint64_t *pu64Dst;
10378 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10379 if (rc == VINF_SUCCESS)
10380 {
10381 *pu64Dst = u64Value;
10382 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10383 }
10384
10385 /* Commit the new RSP value unless we an access handler made trouble. */
10386 if (rc == VINF_SUCCESS)
10387 *pTmpRsp = NewRsp;
10388
10389 return rc;
10390}
10391
10392
10393/**
10394 * Pops a word from the stack, using a temporary stack pointer.
10395 *
10396 * @returns Strict VBox status code.
10397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10398 * @param pu16Value Where to store the popped value.
10399 * @param pTmpRsp Pointer to the temporary stack pointer.
10400 */
10401IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10402{
10403 /* Increment the stack pointer. */
10404 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10405 RTUINT64U NewRsp = *pTmpRsp;
10406 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10407
10408 /* Write the word the lazy way. */
10409 uint16_t const *pu16Src;
10410 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10411 if (rc == VINF_SUCCESS)
10412 {
10413 *pu16Value = *pu16Src;
10414 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10415
10416 /* Commit the new RSP value. */
10417 if (rc == VINF_SUCCESS)
10418 *pTmpRsp = NewRsp;
10419 }
10420
10421 return rc;
10422}
10423
10424
10425/**
10426 * Pops a dword from the stack, using a temporary stack pointer.
10427 *
10428 * @returns Strict VBox status code.
10429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10430 * @param pu32Value Where to store the popped value.
10431 * @param pTmpRsp Pointer to the temporary stack pointer.
10432 */
10433IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10434{
10435 /* Increment the stack pointer. */
10436 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10437 RTUINT64U NewRsp = *pTmpRsp;
10438 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10439
10440 /* Write the word the lazy way. */
10441 uint32_t const *pu32Src;
10442 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10443 if (rc == VINF_SUCCESS)
10444 {
10445 *pu32Value = *pu32Src;
10446 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10447
10448 /* Commit the new RSP value. */
10449 if (rc == VINF_SUCCESS)
10450 *pTmpRsp = NewRsp;
10451 }
10452
10453 return rc;
10454}
10455
10456
10457/**
10458 * Pops a qword from the stack, using a temporary stack pointer.
10459 *
10460 * @returns Strict VBox status code.
10461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10462 * @param pu64Value Where to store the popped value.
10463 * @param pTmpRsp Pointer to the temporary stack pointer.
10464 */
10465IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10466{
10467 /* Increment the stack pointer. */
10468 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10469 RTUINT64U NewRsp = *pTmpRsp;
10470 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10471
10472 /* Write the word the lazy way. */
10473 uint64_t const *pu64Src;
10474 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10475 if (rcStrict == VINF_SUCCESS)
10476 {
10477 *pu64Value = *pu64Src;
10478 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10479
10480 /* Commit the new RSP value. */
10481 if (rcStrict == VINF_SUCCESS)
10482 *pTmpRsp = NewRsp;
10483 }
10484
10485 return rcStrict;
10486}
10487
10488
10489/**
10490 * Begin a special stack push (used by interrupt, exceptions and such).
10491 *
10492 * This will raise \#SS or \#PF if appropriate.
10493 *
10494 * @returns Strict VBox status code.
10495 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10496 * @param cbMem The number of bytes to push onto the stack.
10497 * @param ppvMem Where to return the pointer to the stack memory.
10498 * As with the other memory functions this could be
10499 * direct access or bounce buffered access, so
10500 * don't commit register until the commit call
10501 * succeeds.
10502 * @param puNewRsp Where to return the new RSP value. This must be
10503 * passed unchanged to
10504 * iemMemStackPushCommitSpecial().
10505 */
10506IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10507{
10508 Assert(cbMem < UINT8_MAX);
10509 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10510 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10511 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10512}
10513
10514
10515/**
10516 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10517 *
10518 * This will update the rSP.
10519 *
10520 * @returns Strict VBox status code.
10521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10522 * @param pvMem The pointer returned by
10523 * iemMemStackPushBeginSpecial().
10524 * @param uNewRsp The new RSP value returned by
10525 * iemMemStackPushBeginSpecial().
10526 */
10527IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10528{
10529 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10530 if (rcStrict == VINF_SUCCESS)
10531 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10532 return rcStrict;
10533}
10534
10535
10536/**
10537 * Begin a special stack pop (used by iret, retf and such).
10538 *
10539 * This will raise \#SS or \#PF if appropriate.
10540 *
10541 * @returns Strict VBox status code.
10542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10543 * @param cbMem The number of bytes to pop from the stack.
10544 * @param ppvMem Where to return the pointer to the stack memory.
10545 * @param puNewRsp Where to return the new RSP value. This must be
10546 * assigned to CPUMCTX::rsp manually some time
10547 * after iemMemStackPopDoneSpecial() has been
10548 * called.
10549 */
10550IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10551{
10552 Assert(cbMem < UINT8_MAX);
10553 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10554 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10555 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10556}
10557
10558
10559/**
10560 * Continue a special stack pop (used by iret and retf).
10561 *
10562 * This will raise \#SS or \#PF if appropriate.
10563 *
10564 * @returns Strict VBox status code.
10565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10566 * @param cbMem The number of bytes to pop from the stack.
10567 * @param ppvMem Where to return the pointer to the stack memory.
10568 * @param puNewRsp Where to return the new RSP value. This must be
10569 * assigned to CPUMCTX::rsp manually some time
10570 * after iemMemStackPopDoneSpecial() has been
10571 * called.
10572 */
10573IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10574{
10575 Assert(cbMem < UINT8_MAX);
10576 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10577 RTUINT64U NewRsp;
10578 NewRsp.u = *puNewRsp;
10579 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10580 *puNewRsp = NewRsp.u;
10581 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10582}
10583
10584
10585/**
10586 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10587 * iemMemStackPopContinueSpecial).
10588 *
10589 * The caller will manually commit the rSP.
10590 *
10591 * @returns Strict VBox status code.
10592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10593 * @param pvMem The pointer returned by
10594 * iemMemStackPopBeginSpecial() or
10595 * iemMemStackPopContinueSpecial().
10596 */
10597IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10598{
10599 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10600}
10601
10602
10603/**
10604 * Fetches a system table byte.
10605 *
10606 * @returns Strict VBox status code.
10607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10608 * @param pbDst Where to return the byte.
10609 * @param iSegReg The index of the segment register to use for
10610 * this access. The base and limits are checked.
10611 * @param GCPtrMem The address of the guest memory.
10612 */
10613IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10614{
10615 /* The lazy approach for now... */
10616 uint8_t const *pbSrc;
10617 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10618 if (rc == VINF_SUCCESS)
10619 {
10620 *pbDst = *pbSrc;
10621 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10622 }
10623 return rc;
10624}
10625
10626
10627/**
10628 * Fetches a system table word.
10629 *
10630 * @returns Strict VBox status code.
10631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10632 * @param pu16Dst Where to return the word.
10633 * @param iSegReg The index of the segment register to use for
10634 * this access. The base and limits are checked.
10635 * @param GCPtrMem The address of the guest memory.
10636 */
10637IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10638{
10639 /* The lazy approach for now... */
10640 uint16_t const *pu16Src;
10641 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10642 if (rc == VINF_SUCCESS)
10643 {
10644 *pu16Dst = *pu16Src;
10645 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10646 }
10647 return rc;
10648}
10649
10650
10651/**
10652 * Fetches a system table dword.
10653 *
10654 * @returns Strict VBox status code.
10655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10656 * @param pu32Dst Where to return the dword.
10657 * @param iSegReg The index of the segment register to use for
10658 * this access. The base and limits are checked.
10659 * @param GCPtrMem The address of the guest memory.
10660 */
10661IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10662{
10663 /* The lazy approach for now... */
10664 uint32_t const *pu32Src;
10665 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10666 if (rc == VINF_SUCCESS)
10667 {
10668 *pu32Dst = *pu32Src;
10669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10670 }
10671 return rc;
10672}
10673
10674
10675/**
10676 * Fetches a system table qword.
10677 *
10678 * @returns Strict VBox status code.
10679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10680 * @param pu64Dst Where to return the qword.
10681 * @param iSegReg The index of the segment register to use for
10682 * this access. The base and limits are checked.
10683 * @param GCPtrMem The address of the guest memory.
10684 */
10685IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10686{
10687 /* The lazy approach for now... */
10688 uint64_t const *pu64Src;
10689 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10690 if (rc == VINF_SUCCESS)
10691 {
10692 *pu64Dst = *pu64Src;
10693 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10694 }
10695 return rc;
10696}
10697
10698
10699/**
10700 * Fetches a descriptor table entry with caller specified error code.
10701 *
10702 * @returns Strict VBox status code.
10703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10704 * @param pDesc Where to return the descriptor table entry.
10705 * @param uSel The selector which table entry to fetch.
10706 * @param uXcpt The exception to raise on table lookup error.
10707 * @param uErrorCode The error code associated with the exception.
10708 */
10709IEM_STATIC VBOXSTRICTRC
10710iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10711{
10712 AssertPtr(pDesc);
10713 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10714
10715 /** @todo did the 286 require all 8 bytes to be accessible? */
10716 /*
10717 * Get the selector table base and check bounds.
10718 */
10719 RTGCPTR GCPtrBase;
10720 if (uSel & X86_SEL_LDT)
10721 {
10722 if ( !pCtx->ldtr.Attr.n.u1Present
10723 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10724 {
10725 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10726 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10727 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10728 uErrorCode, 0);
10729 }
10730
10731 Assert(pCtx->ldtr.Attr.n.u1Present);
10732 GCPtrBase = pCtx->ldtr.u64Base;
10733 }
10734 else
10735 {
10736 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10737 {
10738 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10739 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10740 uErrorCode, 0);
10741 }
10742 GCPtrBase = pCtx->gdtr.pGdt;
10743 }
10744
10745 /*
10746 * Read the legacy descriptor and maybe the long mode extensions if
10747 * required.
10748 */
10749 VBOXSTRICTRC rcStrict;
10750 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10751 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10752 else
10753 {
10754 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10755 if (rcStrict == VINF_SUCCESS)
10756 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10757 if (rcStrict == VINF_SUCCESS)
10758 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10759 if (rcStrict == VINF_SUCCESS)
10760 pDesc->Legacy.au16[3] = 0;
10761 else
10762 return rcStrict;
10763 }
10764
10765 if (rcStrict == VINF_SUCCESS)
10766 {
10767 if ( !IEM_IS_LONG_MODE(pVCpu)
10768 || pDesc->Legacy.Gen.u1DescType)
10769 pDesc->Long.au64[1] = 0;
10770 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10771 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10772 else
10773 {
10774 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10775 /** @todo is this the right exception? */
10776 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10777 }
10778 }
10779 return rcStrict;
10780}
10781
10782
10783/**
10784 * Fetches a descriptor table entry.
10785 *
10786 * @returns Strict VBox status code.
10787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10788 * @param pDesc Where to return the descriptor table entry.
10789 * @param uSel The selector which table entry to fetch.
10790 * @param uXcpt The exception to raise on table lookup error.
10791 */
10792IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10793{
10794 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10795}
10796
10797
10798/**
10799 * Fakes a long mode stack selector for SS = 0.
10800 *
10801 * @param pDescSs Where to return the fake stack descriptor.
10802 * @param uDpl The DPL we want.
10803 */
10804IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10805{
10806 pDescSs->Long.au64[0] = 0;
10807 pDescSs->Long.au64[1] = 0;
10808 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10809 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10810 pDescSs->Long.Gen.u2Dpl = uDpl;
10811 pDescSs->Long.Gen.u1Present = 1;
10812 pDescSs->Long.Gen.u1Long = 1;
10813}
10814
10815
10816/**
10817 * Marks the selector descriptor as accessed (only non-system descriptors).
10818 *
10819 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10820 * will therefore skip the limit checks.
10821 *
10822 * @returns Strict VBox status code.
10823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10824 * @param uSel The selector.
10825 */
10826IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10827{
10828 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10829
10830 /*
10831 * Get the selector table base and calculate the entry address.
10832 */
10833 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10834 ? pCtx->ldtr.u64Base
10835 : pCtx->gdtr.pGdt;
10836 GCPtr += uSel & X86_SEL_MASK;
10837
10838 /*
10839 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10840 * ugly stuff to avoid this. This will make sure it's an atomic access
10841 * as well more or less remove any question about 8-bit or 32-bit accesss.
10842 */
10843 VBOXSTRICTRC rcStrict;
10844 uint32_t volatile *pu32;
10845 if ((GCPtr & 3) == 0)
10846 {
10847 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10848 GCPtr += 2 + 2;
10849 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10850 if (rcStrict != VINF_SUCCESS)
10851 return rcStrict;
10852 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10853 }
10854 else
10855 {
10856 /* The misaligned GDT/LDT case, map the whole thing. */
10857 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10858 if (rcStrict != VINF_SUCCESS)
10859 return rcStrict;
10860 switch ((uintptr_t)pu32 & 3)
10861 {
10862 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10863 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10864 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10865 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10866 }
10867 }
10868
10869 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10870}
10871
10872/** @} */
10873
10874
10875/*
10876 * Include the C/C++ implementation of instruction.
10877 */
10878#include "IEMAllCImpl.cpp.h"
10879
10880
10881
10882/** @name "Microcode" macros.
10883 *
10884 * The idea is that we should be able to use the same code to interpret
10885 * instructions as well as recompiler instructions. Thus this obfuscation.
10886 *
10887 * @{
10888 */
10889#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
10890#define IEM_MC_END() }
10891#define IEM_MC_PAUSE() do {} while (0)
10892#define IEM_MC_CONTINUE() do {} while (0)
10893
10894/** Internal macro. */
10895#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
10896 do \
10897 { \
10898 VBOXSTRICTRC rcStrict2 = a_Expr; \
10899 if (rcStrict2 != VINF_SUCCESS) \
10900 return rcStrict2; \
10901 } while (0)
10902
10903
10904#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
10905#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
10906#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
10907#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
10908#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
10909#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
10910#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
10911#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
10912#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
10913 do { \
10914 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
10915 return iemRaiseDeviceNotAvailable(pVCpu); \
10916 } while (0)
10917#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
10918 do { \
10919 if (((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
10920 return iemRaiseDeviceNotAvailable(pVCpu); \
10921 } while (0)
10922#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
10923 do { \
10924 if ((pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
10925 return iemRaiseMathFault(pVCpu); \
10926 } while (0)
10927#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
10928 do { \
10929 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10930 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10931 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
10932 return iemRaiseUndefinedOpcode(pVCpu); \
10933 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10934 return iemRaiseDeviceNotAvailable(pVCpu); \
10935 } while (0)
10936#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
10937 do { \
10938 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10939 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10940 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
10941 return iemRaiseUndefinedOpcode(pVCpu); \
10942 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10943 return iemRaiseDeviceNotAvailable(pVCpu); \
10944 } while (0)
10945#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
10946 do { \
10947 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
10948 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
10949 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
10950 return iemRaiseUndefinedOpcode(pVCpu); \
10951 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10952 return iemRaiseDeviceNotAvailable(pVCpu); \
10953 } while (0)
10954#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
10955 do { \
10956 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10957 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
10958 return iemRaiseUndefinedOpcode(pVCpu); \
10959 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10960 return iemRaiseDeviceNotAvailable(pVCpu); \
10961 } while (0)
10962#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
10963 do { \
10964 if ( ((pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0 & X86_CR0_EM) \
10965 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
10966 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
10967 return iemRaiseUndefinedOpcode(pVCpu); \
10968 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
10969 return iemRaiseDeviceNotAvailable(pVCpu); \
10970 } while (0)
10971#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
10972 do { \
10973 if (pVCpu->iem.s.uCpl != 0) \
10974 return iemRaiseGeneralProtectionFault0(pVCpu); \
10975 } while (0)
10976#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
10977 do { \
10978 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
10979 else return iemRaiseGeneralProtectionFault0(pVCpu); \
10980 } while (0)
10981
10982
10983#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
10984#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
10985#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
10986#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
10987#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
10988#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
10989#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
10990 uint32_t a_Name; \
10991 uint32_t *a_pName = &a_Name
10992#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
10993 do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u = (a_EFlags); Assert((pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u & X86_EFL_1); } while (0)
10994
10995#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
10996#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
10997
10998#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
10999#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11000#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11001#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11002#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11003#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11004#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11005#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11006#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11007#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11008#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11009#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11010#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11011#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11012#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11013#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11014#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11015#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11016#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11017#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg))
11018#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11019#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11020#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->cr0
11021#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11022#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11023#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->ldtr.Sel
11024#define IEM_MC_FETCH_TR_U16(a_u16Dst) (a_u16Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11025#define IEM_MC_FETCH_TR_U32(a_u32Dst) (a_u32Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11026#define IEM_MC_FETCH_TR_U64(a_u64Dst) (a_u64Dst) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->tr.Sel
11027/** @note Not for IOPL or IF testing or modification. */
11028#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11029#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11030#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11031#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11032
11033#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11034#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11035#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11036#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11037#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11038#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11039#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11040#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11041#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11042#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11043#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11044 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11045
11046#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11047#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11048/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11049 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11050#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11051#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11052/** @note Not for IOPL or IF testing or modification. */
11053#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &(pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u
11054
11055#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11056#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11057#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11058 do { \
11059 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11060 *pu32Reg += (a_u32Value); \
11061 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11062 } while (0)
11063#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11064
11065#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11066#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11067#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11068 do { \
11069 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11070 *pu32Reg -= (a_u32Value); \
11071 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11072 } while (0)
11073#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11074#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11075
11076#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11077#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11078#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11079#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11080#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11081#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11082#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11083
11084#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11085#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11086#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11087#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11088
11089#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11090#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11091#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11092
11093#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11094#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11095#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11096
11097#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11098#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11099#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11100
11101#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11102#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11103#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11104
11105#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11106
11107#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11108
11109#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11110#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11111#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11112 do { \
11113 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11114 *pu32Reg &= (a_u32Value); \
11115 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11116 } while (0)
11117#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11118
11119#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11120#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11121#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11122 do { \
11123 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11124 *pu32Reg |= (a_u32Value); \
11125 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11126 } while (0)
11127#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11128
11129
11130/** @note Not for IOPL or IF modification. */
11131#define IEM_MC_SET_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u |= (a_fBit); } while (0)
11132/** @note Not for IOPL or IF modification. */
11133#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u &= ~(a_fBit); } while (0)
11134/** @note Not for IOPL or IF modification. */
11135#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->eflags.u ^= (a_fBit); } while (0)
11136
11137#define IEM_MC_CLEAR_FSW_EX() do { (pVCpu)->iem.s.CTX_SUFF(pCtx)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11138
11139
11140#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11141 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11142#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11143 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11144#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) \
11145 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); } while (0)
11146#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) \
11147 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); } while (0)
11148#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) \
11149 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11150#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11151 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11152#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11153 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11154
11155#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11156 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11157 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11158 } while (0)
11159#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11160 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11161#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11162 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11163#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11164 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11165#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11166 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11167 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11168 } while (0)
11169#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11170 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11171#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11172 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11173 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11174 } while (0)
11175#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11176 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11177#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11178 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11179 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11180 } while (0)
11181#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11182 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11183#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11184 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11185#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11186 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11187#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11188 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11189 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11190 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11191 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11192 } while (0)
11193
11194#ifndef IEM_WITH_SETJMP
11195# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11196 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11197# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11198 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11199# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11200 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11201#else
11202# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11203 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11204# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11205 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11206# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11207 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11208#endif
11209
11210#ifndef IEM_WITH_SETJMP
11211# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11212 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11213# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11214 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11215# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11216 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11217#else
11218# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11219 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11220# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11221 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11222# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11223 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11224#endif
11225
11226#ifndef IEM_WITH_SETJMP
11227# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11228 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11229# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11230 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11231# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11232 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11233#else
11234# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11235 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11236# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11237 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11238# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11239 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11240#endif
11241
11242#ifdef SOME_UNUSED_FUNCTION
11243# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11244 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11245#endif
11246
11247#ifndef IEM_WITH_SETJMP
11248# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11249 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11250# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11251 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11252# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11253 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11254# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11255 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11256#else
11257# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11258 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11259# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11260 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11261# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11262 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11263# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11264 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11265#endif
11266
11267#ifndef IEM_WITH_SETJMP
11268# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11269 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11270# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11271 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11272# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11273 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11274#else
11275# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11276 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11277# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11278 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11279# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11280 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11281#endif
11282
11283#ifndef IEM_WITH_SETJMP
11284# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11285 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11286# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11287 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11288#else
11289# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11290 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11291# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11292 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11293#endif
11294
11295
11296
11297#ifndef IEM_WITH_SETJMP
11298# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11299 do { \
11300 uint8_t u8Tmp; \
11301 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11302 (a_u16Dst) = u8Tmp; \
11303 } while (0)
11304# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11305 do { \
11306 uint8_t u8Tmp; \
11307 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11308 (a_u32Dst) = u8Tmp; \
11309 } while (0)
11310# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11311 do { \
11312 uint8_t u8Tmp; \
11313 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11314 (a_u64Dst) = u8Tmp; \
11315 } while (0)
11316# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11317 do { \
11318 uint16_t u16Tmp; \
11319 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11320 (a_u32Dst) = u16Tmp; \
11321 } while (0)
11322# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11323 do { \
11324 uint16_t u16Tmp; \
11325 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11326 (a_u64Dst) = u16Tmp; \
11327 } while (0)
11328# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11329 do { \
11330 uint32_t u32Tmp; \
11331 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11332 (a_u64Dst) = u32Tmp; \
11333 } while (0)
11334#else /* IEM_WITH_SETJMP */
11335# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11336 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11337# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11338 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11339# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11340 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11341# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11342 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11343# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11344 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11345# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11346 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11347#endif /* IEM_WITH_SETJMP */
11348
11349#ifndef IEM_WITH_SETJMP
11350# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11351 do { \
11352 uint8_t u8Tmp; \
11353 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11354 (a_u16Dst) = (int8_t)u8Tmp; \
11355 } while (0)
11356# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11357 do { \
11358 uint8_t u8Tmp; \
11359 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11360 (a_u32Dst) = (int8_t)u8Tmp; \
11361 } while (0)
11362# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11363 do { \
11364 uint8_t u8Tmp; \
11365 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11366 (a_u64Dst) = (int8_t)u8Tmp; \
11367 } while (0)
11368# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11369 do { \
11370 uint16_t u16Tmp; \
11371 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11372 (a_u32Dst) = (int16_t)u16Tmp; \
11373 } while (0)
11374# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11375 do { \
11376 uint16_t u16Tmp; \
11377 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11378 (a_u64Dst) = (int16_t)u16Tmp; \
11379 } while (0)
11380# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11381 do { \
11382 uint32_t u32Tmp; \
11383 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11384 (a_u64Dst) = (int32_t)u32Tmp; \
11385 } while (0)
11386#else /* IEM_WITH_SETJMP */
11387# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11388 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11389# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11390 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11391# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11392 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11393# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11394 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11395# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11396 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11397# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11398 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11399#endif /* IEM_WITH_SETJMP */
11400
11401#ifndef IEM_WITH_SETJMP
11402# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11403 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11404# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11405 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11406# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11407 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11408# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11409 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11410#else
11411# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11412 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11413# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11414 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11415# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11416 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11417# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11418 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11419#endif
11420
11421#ifndef IEM_WITH_SETJMP
11422# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11423 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11424# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11425 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11426# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11427 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11428# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11429 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11430#else
11431# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11432 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11433# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11434 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11435# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11436 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11437# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11438 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11439#endif
11440
11441#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11442#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11443#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11444#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11445#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11446#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11447#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11448 do { \
11449 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11450 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11451 } while (0)
11452
11453#ifndef IEM_WITH_SETJMP
11454# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11455 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11456# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11457 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11458#else
11459# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11460 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11461# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11462 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11463#endif
11464
11465
11466#define IEM_MC_PUSH_U16(a_u16Value) \
11467 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11468#define IEM_MC_PUSH_U32(a_u32Value) \
11469 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11470#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11471 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11472#define IEM_MC_PUSH_U64(a_u64Value) \
11473 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11474
11475#define IEM_MC_POP_U16(a_pu16Value) \
11476 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11477#define IEM_MC_POP_U32(a_pu32Value) \
11478 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11479#define IEM_MC_POP_U64(a_pu64Value) \
11480 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11481
11482/** Maps guest memory for direct or bounce buffered access.
11483 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11484 * @remarks May return.
11485 */
11486#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11487 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11488
11489/** Maps guest memory for direct or bounce buffered access.
11490 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11491 * @remarks May return.
11492 */
11493#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11494 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11495
11496/** Commits the memory and unmaps the guest memory.
11497 * @remarks May return.
11498 */
11499#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11500 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11501
11502/** Commits the memory and unmaps the guest memory unless the FPU status word
11503 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11504 * that would cause FLD not to store.
11505 *
11506 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11507 * store, while \#P will not.
11508 *
11509 * @remarks May in theory return - for now.
11510 */
11511#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11512 do { \
11513 if ( !(a_u16FSW & X86_FSW_ES) \
11514 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11515 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11516 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11517 } while (0)
11518
11519/** Calculate efficient address from R/M. */
11520#ifndef IEM_WITH_SETJMP
11521# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11522 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11523#else
11524# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11525 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11526#endif
11527
11528#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11529#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11530#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11531#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11532#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11533#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11534#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11535
11536/**
11537 * Defers the rest of the instruction emulation to a C implementation routine
11538 * and returns, only taking the standard parameters.
11539 *
11540 * @param a_pfnCImpl The pointer to the C routine.
11541 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11542 */
11543#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11544
11545/**
11546 * Defers the rest of instruction emulation to a C implementation routine and
11547 * returns, taking one argument in addition to the standard ones.
11548 *
11549 * @param a_pfnCImpl The pointer to the C routine.
11550 * @param a0 The argument.
11551 */
11552#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11553
11554/**
11555 * Defers the rest of the instruction emulation to a C implementation routine
11556 * and returns, taking two arguments in addition to the standard ones.
11557 *
11558 * @param a_pfnCImpl The pointer to the C routine.
11559 * @param a0 The first extra argument.
11560 * @param a1 The second extra argument.
11561 */
11562#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11563
11564/**
11565 * Defers the rest of the instruction emulation to a C implementation routine
11566 * and returns, taking three arguments in addition to the standard ones.
11567 *
11568 * @param a_pfnCImpl The pointer to the C routine.
11569 * @param a0 The first extra argument.
11570 * @param a1 The second extra argument.
11571 * @param a2 The third extra argument.
11572 */
11573#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11574
11575/**
11576 * Defers the rest of the instruction emulation to a C implementation routine
11577 * and returns, taking four arguments in addition to the standard ones.
11578 *
11579 * @param a_pfnCImpl The pointer to the C routine.
11580 * @param a0 The first extra argument.
11581 * @param a1 The second extra argument.
11582 * @param a2 The third extra argument.
11583 * @param a3 The fourth extra argument.
11584 */
11585#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11586
11587/**
11588 * Defers the rest of the instruction emulation to a C implementation routine
11589 * and returns, taking two arguments in addition to the standard ones.
11590 *
11591 * @param a_pfnCImpl The pointer to the C routine.
11592 * @param a0 The first extra argument.
11593 * @param a1 The second extra argument.
11594 * @param a2 The third extra argument.
11595 * @param a3 The fourth extra argument.
11596 * @param a4 The fifth extra argument.
11597 */
11598#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11599
11600/**
11601 * Defers the entire instruction emulation to a C implementation routine and
11602 * returns, only taking the standard parameters.
11603 *
11604 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11605 *
11606 * @param a_pfnCImpl The pointer to the C routine.
11607 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11608 */
11609#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11610
11611/**
11612 * Defers the entire instruction emulation to a C implementation routine and
11613 * returns, taking one argument in addition to the standard ones.
11614 *
11615 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11616 *
11617 * @param a_pfnCImpl The pointer to the C routine.
11618 * @param a0 The argument.
11619 */
11620#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11621
11622/**
11623 * Defers the entire instruction emulation to a C implementation routine and
11624 * returns, taking two arguments in addition to the standard ones.
11625 *
11626 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11627 *
11628 * @param a_pfnCImpl The pointer to the C routine.
11629 * @param a0 The first extra argument.
11630 * @param a1 The second extra argument.
11631 */
11632#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11633
11634/**
11635 * Defers the entire instruction emulation to a C implementation routine and
11636 * returns, taking three arguments in addition to the standard ones.
11637 *
11638 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11639 *
11640 * @param a_pfnCImpl The pointer to the C routine.
11641 * @param a0 The first extra argument.
11642 * @param a1 The second extra argument.
11643 * @param a2 The third extra argument.
11644 */
11645#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11646
11647/**
11648 * Calls a FPU assembly implementation taking one visible argument.
11649 *
11650 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11651 * @param a0 The first extra argument.
11652 */
11653#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
11654 do { \
11655 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
11656 } while (0)
11657
11658/**
11659 * Calls a FPU assembly implementation taking two visible arguments.
11660 *
11661 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11662 * @param a0 The first extra argument.
11663 * @param a1 The second extra argument.
11664 */
11665#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
11666 do { \
11667 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11668 } while (0)
11669
11670/**
11671 * Calls a FPU assembly implementation taking three visible arguments.
11672 *
11673 * @param a_pfnAImpl Pointer to the assembly FPU routine.
11674 * @param a0 The first extra argument.
11675 * @param a1 The second extra argument.
11676 * @param a2 The third extra argument.
11677 */
11678#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11679 do { \
11680 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11681 } while (0)
11682
11683#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
11684 do { \
11685 (a_FpuData).FSW = (a_FSW); \
11686 (a_FpuData).r80Result = *(a_pr80Value); \
11687 } while (0)
11688
11689/** Pushes FPU result onto the stack. */
11690#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
11691 iemFpuPushResult(pVCpu, &a_FpuData)
11692/** Pushes FPU result onto the stack and sets the FPUDP. */
11693#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
11694 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
11695
11696/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
11697#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
11698 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
11699
11700/** Stores FPU result in a stack register. */
11701#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
11702 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
11703/** Stores FPU result in a stack register and pops the stack. */
11704#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
11705 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
11706/** Stores FPU result in a stack register and sets the FPUDP. */
11707#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11708 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11709/** Stores FPU result in a stack register, sets the FPUDP, and pops the
11710 * stack. */
11711#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
11712 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
11713
11714/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
11715#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
11716 iemFpuUpdateOpcodeAndIp(pVCpu)
11717/** Free a stack register (for FFREE and FFREEP). */
11718#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
11719 iemFpuStackFree(pVCpu, a_iStReg)
11720/** Increment the FPU stack pointer. */
11721#define IEM_MC_FPU_STACK_INC_TOP() \
11722 iemFpuStackIncTop(pVCpu)
11723/** Decrement the FPU stack pointer. */
11724#define IEM_MC_FPU_STACK_DEC_TOP() \
11725 iemFpuStackDecTop(pVCpu)
11726
11727/** Updates the FSW, FOP, FPUIP, and FPUCS. */
11728#define IEM_MC_UPDATE_FSW(a_u16FSW) \
11729 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11730/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
11731#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
11732 iemFpuUpdateFSW(pVCpu, a_u16FSW)
11733/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
11734#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11735 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11736/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
11737#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
11738 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
11739/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
11740 * stack. */
11741#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
11742 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
11743/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
11744#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
11745 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
11746
11747/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
11748#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
11749 iemFpuStackUnderflow(pVCpu, a_iStDst)
11750/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11751 * stack. */
11752#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
11753 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
11754/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11755 * FPUDS. */
11756#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11757 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11758/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
11759 * FPUDS. Pops stack. */
11760#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
11761 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
11762/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
11763 * stack twice. */
11764#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
11765 iemFpuStackUnderflowThenPopPop(pVCpu)
11766/** Raises a FPU stack underflow exception for an instruction pushing a result
11767 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
11768#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
11769 iemFpuStackPushUnderflow(pVCpu)
11770/** Raises a FPU stack underflow exception for an instruction pushing a result
11771 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
11772#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
11773 iemFpuStackPushUnderflowTwo(pVCpu)
11774
11775/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11776 * FPUIP, FPUCS and FOP. */
11777#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
11778 iemFpuStackPushOverflow(pVCpu)
11779/** Raises a FPU stack overflow exception as part of a push attempt. Sets
11780 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
11781#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
11782 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
11783/** Prepares for using the FPU state.
11784 * Ensures that we can use the host FPU in the current context (RC+R0.
11785 * Ensures the guest FPU state in the CPUMCTX is up to date. */
11786#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
11787/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
11788#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
11789/** Actualizes the guest FPU state so it can be accessed and modified. */
11790#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
11791
11792/** Prepares for using the SSE state.
11793 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
11794 * Ensures the guest SSE state in the CPUMCTX is up to date. */
11795#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
11796/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
11797#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
11798/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
11799#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
11800
11801/**
11802 * Calls a MMX assembly implementation taking two visible arguments.
11803 *
11804 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11805 * @param a0 The first extra argument.
11806 * @param a1 The second extra argument.
11807 */
11808#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
11809 do { \
11810 IEM_MC_PREPARE_FPU_USAGE(); \
11811 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11812 } while (0)
11813
11814/**
11815 * Calls a MMX assembly implementation taking three visible arguments.
11816 *
11817 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11818 * @param a0 The first extra argument.
11819 * @param a1 The second extra argument.
11820 * @param a2 The third extra argument.
11821 */
11822#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11823 do { \
11824 IEM_MC_PREPARE_FPU_USAGE(); \
11825 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11826 } while (0)
11827
11828
11829/**
11830 * Calls a SSE assembly implementation taking two visible arguments.
11831 *
11832 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11833 * @param a0 The first extra argument.
11834 * @param a1 The second extra argument.
11835 */
11836#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
11837 do { \
11838 IEM_MC_PREPARE_SSE_USAGE(); \
11839 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
11840 } while (0)
11841
11842/**
11843 * Calls a SSE assembly implementation taking three visible arguments.
11844 *
11845 * @param a_pfnAImpl Pointer to the assembly MMX routine.
11846 * @param a0 The first extra argument.
11847 * @param a1 The second extra argument.
11848 * @param a2 The third extra argument.
11849 */
11850#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
11851 do { \
11852 IEM_MC_PREPARE_SSE_USAGE(); \
11853 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
11854 } while (0)
11855
11856/** @note Not for IOPL or IF testing. */
11857#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
11858/** @note Not for IOPL or IF testing. */
11859#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
11860/** @note Not for IOPL or IF testing. */
11861#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
11862/** @note Not for IOPL or IF testing. */
11863#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
11864/** @note Not for IOPL or IF testing. */
11865#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
11866 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11867 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11868/** @note Not for IOPL or IF testing. */
11869#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
11870 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11871 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11872/** @note Not for IOPL or IF testing. */
11873#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
11874 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11875 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11876 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11877/** @note Not for IOPL or IF testing. */
11878#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
11879 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
11880 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
11881 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
11882#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
11883#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
11884#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
11885/** @note Not for IOPL or IF testing. */
11886#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11887 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11888 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11889/** @note Not for IOPL or IF testing. */
11890#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11891 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11892 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11893/** @note Not for IOPL or IF testing. */
11894#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
11895 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11896 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11897/** @note Not for IOPL or IF testing. */
11898#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11899 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
11900 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11901/** @note Not for IOPL or IF testing. */
11902#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11903 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
11904 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11905/** @note Not for IOPL or IF testing. */
11906#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
11907 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
11908 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
11909#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
11910#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
11911
11912#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
11913 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
11914#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
11915 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
11916#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
11917 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
11918#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
11919 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
11920#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
11921 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
11922#define IEM_MC_IF_FCW_IM() \
11923 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
11924
11925#define IEM_MC_ELSE() } else {
11926#define IEM_MC_ENDIF() } do {} while (0)
11927
11928/** @} */
11929
11930
11931/** @name Opcode Debug Helpers.
11932 * @{
11933 */
11934#ifdef VBOX_WITH_STATISTICS
11935# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
11936#else
11937# define IEMOP_INC_STATS(a_Stats) do { } while (0)
11938#endif
11939
11940#ifdef DEBUG
11941# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
11942 do { \
11943 IEMOP_INC_STATS(a_Stats); \
11944 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
11945 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
11946 } while (0)
11947
11948# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
11949 do { \
11950 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11951 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11952 (void)RT_CONCAT(OP_,a_Upper); \
11953 (void)(a_fDisHints); \
11954 (void)(a_fIemHints); \
11955 } while (0)
11956
11957# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
11958 do { \
11959 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11960 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11961 (void)RT_CONCAT(OP_,a_Upper); \
11962 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11963 (void)(a_fDisHints); \
11964 (void)(a_fIemHints); \
11965 } while (0)
11966
11967# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
11968 do { \
11969 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11970 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11971 (void)RT_CONCAT(OP_,a_Upper); \
11972 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11973 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11974 (void)(a_fDisHints); \
11975 (void)(a_fIemHints); \
11976 } while (0)
11977
11978# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
11979 do { \
11980 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11981 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11982 (void)RT_CONCAT(OP_,a_Upper); \
11983 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11984 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11985 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11986 (void)(a_fDisHints); \
11987 (void)(a_fIemHints); \
11988 } while (0)
11989
11990# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
11991 do { \
11992 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
11993 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
11994 (void)RT_CONCAT(OP_,a_Upper); \
11995 (void)RT_CONCAT(OP_PARM_,a_Op1); \
11996 (void)RT_CONCAT(OP_PARM_,a_Op2); \
11997 (void)RT_CONCAT(OP_PARM_,a_Op3); \
11998 (void)RT_CONCAT(OP_PARM_,a_Op4); \
11999 (void)(a_fDisHints); \
12000 (void)(a_fIemHints); \
12001 } while (0)
12002
12003#else
12004# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12005
12006# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12007 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12008# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12009 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12010# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12011 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12012# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12013 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12014# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12015 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12016
12017#endif
12018
12019#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12020 IEMOP_MNEMONIC0EX(a_Lower, \
12021 #a_Lower, \
12022 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12023#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12024 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12025 #a_Lower " " #a_Op1, \
12026 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12027#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12028 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12029 #a_Lower " " #a_Op1 "," #a_Op2, \
12030 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12031#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12032 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12033 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12034 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12035#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12036 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12037 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12038 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12039
12040/** @} */
12041
12042
12043/** @name Opcode Helpers.
12044 * @{
12045 */
12046
12047#ifdef IN_RING3
12048# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12049 do { \
12050 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12051 else \
12052 { \
12053 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12054 return IEMOP_RAISE_INVALID_OPCODE(); \
12055 } \
12056 } while (0)
12057#else
12058# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12059 do { \
12060 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12061 else return IEMOP_RAISE_INVALID_OPCODE(); \
12062 } while (0)
12063#endif
12064
12065/** The instruction requires a 186 or later. */
12066#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12067# define IEMOP_HLP_MIN_186() do { } while (0)
12068#else
12069# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12070#endif
12071
12072/** The instruction requires a 286 or later. */
12073#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12074# define IEMOP_HLP_MIN_286() do { } while (0)
12075#else
12076# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12077#endif
12078
12079/** The instruction requires a 386 or later. */
12080#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12081# define IEMOP_HLP_MIN_386() do { } while (0)
12082#else
12083# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12084#endif
12085
12086/** The instruction requires a 386 or later if the given expression is true. */
12087#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12088# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12089#else
12090# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12091#endif
12092
12093/** The instruction requires a 486 or later. */
12094#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12095# define IEMOP_HLP_MIN_486() do { } while (0)
12096#else
12097# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12098#endif
12099
12100/** The instruction requires a Pentium (586) or later. */
12101#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12102# define IEMOP_HLP_MIN_586() do { } while (0)
12103#else
12104# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12105#endif
12106
12107/** The instruction requires a PentiumPro (686) or later. */
12108#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12109# define IEMOP_HLP_MIN_686() do { } while (0)
12110#else
12111# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12112#endif
12113
12114
12115/** The instruction raises an \#UD in real and V8086 mode. */
12116#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12117 do \
12118 { \
12119 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12120 else return IEMOP_RAISE_INVALID_OPCODE(); \
12121 } while (0)
12122
12123/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12124 * 64-bit mode. */
12125#define IEMOP_HLP_NO_64BIT() \
12126 do \
12127 { \
12128 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12129 return IEMOP_RAISE_INVALID_OPCODE(); \
12130 } while (0)
12131
12132/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12133 * 64-bit mode. */
12134#define IEMOP_HLP_ONLY_64BIT() \
12135 do \
12136 { \
12137 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12138 return IEMOP_RAISE_INVALID_OPCODE(); \
12139 } while (0)
12140
12141/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12142#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12143 do \
12144 { \
12145 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12146 iemRecalEffOpSize64Default(pVCpu); \
12147 } while (0)
12148
12149/** The instruction has 64-bit operand size if 64-bit mode. */
12150#define IEMOP_HLP_64BIT_OP_SIZE() \
12151 do \
12152 { \
12153 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12154 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12155 } while (0)
12156
12157/** Only a REX prefix immediately preceeding the first opcode byte takes
12158 * effect. This macro helps ensuring this as well as logging bad guest code. */
12159#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12160 do \
12161 { \
12162 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12163 { \
12164 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12165 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12166 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12167 pVCpu->iem.s.uRexB = 0; \
12168 pVCpu->iem.s.uRexIndex = 0; \
12169 pVCpu->iem.s.uRexReg = 0; \
12170 iemRecalEffOpSize(pVCpu); \
12171 } \
12172 } while (0)
12173
12174/**
12175 * Done decoding.
12176 */
12177#define IEMOP_HLP_DONE_DECODING() \
12178 do \
12179 { \
12180 /*nothing for now, maybe later... */ \
12181 } while (0)
12182
12183/**
12184 * Done decoding, raise \#UD exception if lock prefix present.
12185 */
12186#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12187 do \
12188 { \
12189 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12190 { /* likely */ } \
12191 else \
12192 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12193 } while (0)
12194
12195#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12196 do \
12197 { \
12198 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12199 { /* likely */ } \
12200 else \
12201 { \
12202 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12203 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12204 } \
12205 } while (0)
12206#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12207 do \
12208 { \
12209 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12210 { /* likely */ } \
12211 else \
12212 { \
12213 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12214 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12215 } \
12216 } while (0)
12217
12218/**
12219 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12220 * are present.
12221 */
12222#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12223 do \
12224 { \
12225 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12226 { /* likely */ } \
12227 else \
12228 return IEMOP_RAISE_INVALID_OPCODE(); \
12229 } while (0)
12230
12231
12232/**
12233 * Done decoding VEX.
12234 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, or if
12235 * we're in real or v8086 mode.
12236 */
12237#define IEMOP_HLP_DONE_VEX_DECODING() \
12238 do \
12239 { \
12240 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12241 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12242 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12243 { /* likely */ } \
12244 else \
12245 return IEMOP_RAISE_INVALID_OPCODE(); \
12246 } while (0)
12247
12248/**
12249 * Done decoding VEX, no V, no L.
12250 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12251 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12252 */
12253#define IEMOP_HLP_DONE_VEX_DECODING_L_ZERO_NO_VVV() \
12254 do \
12255 { \
12256 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12257 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12258 && pVCpu->iem.s.uVexLength == 0 \
12259 && pVCpu->iem.s.uVex3rdReg == 0 \
12260 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12261 { /* likely */ } \
12262 else \
12263 return IEMOP_RAISE_INVALID_OPCODE(); \
12264 } while (0)
12265
12266#ifdef VBOX_WITH_NESTED_HWVIRT
12267/** Check and handles SVM nested-guest control & instruction intercept. */
12268# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12269 do \
12270 { \
12271 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12272 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12273 } while (0)
12274
12275/** Check and handle SVM nested-guest CR0 read intercept. */
12276# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12277 do \
12278 { \
12279 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12280 IEM_RETURN_SVM_NST_GST_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12281 } while (0)
12282
12283#else
12284# define IEMOP_HLP_SVM_CTRL_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12285# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12286
12287#endif /* VBOX_WITH_NESTED_HWVIRT */
12288
12289
12290/**
12291 * Calculates the effective address of a ModR/M memory operand.
12292 *
12293 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12294 *
12295 * @return Strict VBox status code.
12296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12297 * @param bRm The ModRM byte.
12298 * @param cbImm The size of any immediate following the
12299 * effective address opcode bytes. Important for
12300 * RIP relative addressing.
12301 * @param pGCPtrEff Where to return the effective address.
12302 */
12303IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12304{
12305 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12306 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12307# define SET_SS_DEF() \
12308 do \
12309 { \
12310 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12311 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12312 } while (0)
12313
12314 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12315 {
12316/** @todo Check the effective address size crap! */
12317 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12318 {
12319 uint16_t u16EffAddr;
12320
12321 /* Handle the disp16 form with no registers first. */
12322 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12323 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12324 else
12325 {
12326 /* Get the displacment. */
12327 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12328 {
12329 case 0: u16EffAddr = 0; break;
12330 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12331 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12332 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12333 }
12334
12335 /* Add the base and index registers to the disp. */
12336 switch (bRm & X86_MODRM_RM_MASK)
12337 {
12338 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12339 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12340 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12341 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12342 case 4: u16EffAddr += pCtx->si; break;
12343 case 5: u16EffAddr += pCtx->di; break;
12344 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12345 case 7: u16EffAddr += pCtx->bx; break;
12346 }
12347 }
12348
12349 *pGCPtrEff = u16EffAddr;
12350 }
12351 else
12352 {
12353 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12354 uint32_t u32EffAddr;
12355
12356 /* Handle the disp32 form with no registers first. */
12357 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12358 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12359 else
12360 {
12361 /* Get the register (or SIB) value. */
12362 switch ((bRm & X86_MODRM_RM_MASK))
12363 {
12364 case 0: u32EffAddr = pCtx->eax; break;
12365 case 1: u32EffAddr = pCtx->ecx; break;
12366 case 2: u32EffAddr = pCtx->edx; break;
12367 case 3: u32EffAddr = pCtx->ebx; break;
12368 case 4: /* SIB */
12369 {
12370 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12371
12372 /* Get the index and scale it. */
12373 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12374 {
12375 case 0: u32EffAddr = pCtx->eax; break;
12376 case 1: u32EffAddr = pCtx->ecx; break;
12377 case 2: u32EffAddr = pCtx->edx; break;
12378 case 3: u32EffAddr = pCtx->ebx; break;
12379 case 4: u32EffAddr = 0; /*none */ break;
12380 case 5: u32EffAddr = pCtx->ebp; break;
12381 case 6: u32EffAddr = pCtx->esi; break;
12382 case 7: u32EffAddr = pCtx->edi; break;
12383 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12384 }
12385 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12386
12387 /* add base */
12388 switch (bSib & X86_SIB_BASE_MASK)
12389 {
12390 case 0: u32EffAddr += pCtx->eax; break;
12391 case 1: u32EffAddr += pCtx->ecx; break;
12392 case 2: u32EffAddr += pCtx->edx; break;
12393 case 3: u32EffAddr += pCtx->ebx; break;
12394 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12395 case 5:
12396 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12397 {
12398 u32EffAddr += pCtx->ebp;
12399 SET_SS_DEF();
12400 }
12401 else
12402 {
12403 uint32_t u32Disp;
12404 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12405 u32EffAddr += u32Disp;
12406 }
12407 break;
12408 case 6: u32EffAddr += pCtx->esi; break;
12409 case 7: u32EffAddr += pCtx->edi; break;
12410 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12411 }
12412 break;
12413 }
12414 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12415 case 6: u32EffAddr = pCtx->esi; break;
12416 case 7: u32EffAddr = pCtx->edi; break;
12417 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12418 }
12419
12420 /* Get and add the displacement. */
12421 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12422 {
12423 case 0:
12424 break;
12425 case 1:
12426 {
12427 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12428 u32EffAddr += i8Disp;
12429 break;
12430 }
12431 case 2:
12432 {
12433 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12434 u32EffAddr += u32Disp;
12435 break;
12436 }
12437 default:
12438 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12439 }
12440
12441 }
12442 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12443 *pGCPtrEff = u32EffAddr;
12444 else
12445 {
12446 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12447 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12448 }
12449 }
12450 }
12451 else
12452 {
12453 uint64_t u64EffAddr;
12454
12455 /* Handle the rip+disp32 form with no registers first. */
12456 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12457 {
12458 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12459 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12460 }
12461 else
12462 {
12463 /* Get the register (or SIB) value. */
12464 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12465 {
12466 case 0: u64EffAddr = pCtx->rax; break;
12467 case 1: u64EffAddr = pCtx->rcx; break;
12468 case 2: u64EffAddr = pCtx->rdx; break;
12469 case 3: u64EffAddr = pCtx->rbx; break;
12470 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12471 case 6: u64EffAddr = pCtx->rsi; break;
12472 case 7: u64EffAddr = pCtx->rdi; break;
12473 case 8: u64EffAddr = pCtx->r8; break;
12474 case 9: u64EffAddr = pCtx->r9; break;
12475 case 10: u64EffAddr = pCtx->r10; break;
12476 case 11: u64EffAddr = pCtx->r11; break;
12477 case 13: u64EffAddr = pCtx->r13; break;
12478 case 14: u64EffAddr = pCtx->r14; break;
12479 case 15: u64EffAddr = pCtx->r15; break;
12480 /* SIB */
12481 case 4:
12482 case 12:
12483 {
12484 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12485
12486 /* Get the index and scale it. */
12487 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12488 {
12489 case 0: u64EffAddr = pCtx->rax; break;
12490 case 1: u64EffAddr = pCtx->rcx; break;
12491 case 2: u64EffAddr = pCtx->rdx; break;
12492 case 3: u64EffAddr = pCtx->rbx; break;
12493 case 4: u64EffAddr = 0; /*none */ break;
12494 case 5: u64EffAddr = pCtx->rbp; break;
12495 case 6: u64EffAddr = pCtx->rsi; break;
12496 case 7: u64EffAddr = pCtx->rdi; break;
12497 case 8: u64EffAddr = pCtx->r8; break;
12498 case 9: u64EffAddr = pCtx->r9; break;
12499 case 10: u64EffAddr = pCtx->r10; break;
12500 case 11: u64EffAddr = pCtx->r11; break;
12501 case 12: u64EffAddr = pCtx->r12; break;
12502 case 13: u64EffAddr = pCtx->r13; break;
12503 case 14: u64EffAddr = pCtx->r14; break;
12504 case 15: u64EffAddr = pCtx->r15; break;
12505 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12506 }
12507 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12508
12509 /* add base */
12510 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12511 {
12512 case 0: u64EffAddr += pCtx->rax; break;
12513 case 1: u64EffAddr += pCtx->rcx; break;
12514 case 2: u64EffAddr += pCtx->rdx; break;
12515 case 3: u64EffAddr += pCtx->rbx; break;
12516 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
12517 case 6: u64EffAddr += pCtx->rsi; break;
12518 case 7: u64EffAddr += pCtx->rdi; break;
12519 case 8: u64EffAddr += pCtx->r8; break;
12520 case 9: u64EffAddr += pCtx->r9; break;
12521 case 10: u64EffAddr += pCtx->r10; break;
12522 case 11: u64EffAddr += pCtx->r11; break;
12523 case 12: u64EffAddr += pCtx->r12; break;
12524 case 14: u64EffAddr += pCtx->r14; break;
12525 case 15: u64EffAddr += pCtx->r15; break;
12526 /* complicated encodings */
12527 case 5:
12528 case 13:
12529 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12530 {
12531 if (!pVCpu->iem.s.uRexB)
12532 {
12533 u64EffAddr += pCtx->rbp;
12534 SET_SS_DEF();
12535 }
12536 else
12537 u64EffAddr += pCtx->r13;
12538 }
12539 else
12540 {
12541 uint32_t u32Disp;
12542 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12543 u64EffAddr += (int32_t)u32Disp;
12544 }
12545 break;
12546 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12547 }
12548 break;
12549 }
12550 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12551 }
12552
12553 /* Get and add the displacement. */
12554 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12555 {
12556 case 0:
12557 break;
12558 case 1:
12559 {
12560 int8_t i8Disp;
12561 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12562 u64EffAddr += i8Disp;
12563 break;
12564 }
12565 case 2:
12566 {
12567 uint32_t u32Disp;
12568 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12569 u64EffAddr += (int32_t)u32Disp;
12570 break;
12571 }
12572 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12573 }
12574
12575 }
12576
12577 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12578 *pGCPtrEff = u64EffAddr;
12579 else
12580 {
12581 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12582 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12583 }
12584 }
12585
12586 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12587 return VINF_SUCCESS;
12588}
12589
12590
12591/**
12592 * Calculates the effective address of a ModR/M memory operand.
12593 *
12594 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12595 *
12596 * @return Strict VBox status code.
12597 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12598 * @param bRm The ModRM byte.
12599 * @param cbImm The size of any immediate following the
12600 * effective address opcode bytes. Important for
12601 * RIP relative addressing.
12602 * @param pGCPtrEff Where to return the effective address.
12603 * @param offRsp RSP displacement.
12604 */
12605IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
12606{
12607 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12608 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12609# define SET_SS_DEF() \
12610 do \
12611 { \
12612 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12613 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12614 } while (0)
12615
12616 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12617 {
12618/** @todo Check the effective address size crap! */
12619 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12620 {
12621 uint16_t u16EffAddr;
12622
12623 /* Handle the disp16 form with no registers first. */
12624 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12625 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12626 else
12627 {
12628 /* Get the displacment. */
12629 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12630 {
12631 case 0: u16EffAddr = 0; break;
12632 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12633 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12634 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12635 }
12636
12637 /* Add the base and index registers to the disp. */
12638 switch (bRm & X86_MODRM_RM_MASK)
12639 {
12640 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12641 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12642 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12643 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12644 case 4: u16EffAddr += pCtx->si; break;
12645 case 5: u16EffAddr += pCtx->di; break;
12646 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12647 case 7: u16EffAddr += pCtx->bx; break;
12648 }
12649 }
12650
12651 *pGCPtrEff = u16EffAddr;
12652 }
12653 else
12654 {
12655 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12656 uint32_t u32EffAddr;
12657
12658 /* Handle the disp32 form with no registers first. */
12659 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12660 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12661 else
12662 {
12663 /* Get the register (or SIB) value. */
12664 switch ((bRm & X86_MODRM_RM_MASK))
12665 {
12666 case 0: u32EffAddr = pCtx->eax; break;
12667 case 1: u32EffAddr = pCtx->ecx; break;
12668 case 2: u32EffAddr = pCtx->edx; break;
12669 case 3: u32EffAddr = pCtx->ebx; break;
12670 case 4: /* SIB */
12671 {
12672 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12673
12674 /* Get the index and scale it. */
12675 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12676 {
12677 case 0: u32EffAddr = pCtx->eax; break;
12678 case 1: u32EffAddr = pCtx->ecx; break;
12679 case 2: u32EffAddr = pCtx->edx; break;
12680 case 3: u32EffAddr = pCtx->ebx; break;
12681 case 4: u32EffAddr = 0; /*none */ break;
12682 case 5: u32EffAddr = pCtx->ebp; break;
12683 case 6: u32EffAddr = pCtx->esi; break;
12684 case 7: u32EffAddr = pCtx->edi; break;
12685 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12686 }
12687 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12688
12689 /* add base */
12690 switch (bSib & X86_SIB_BASE_MASK)
12691 {
12692 case 0: u32EffAddr += pCtx->eax; break;
12693 case 1: u32EffAddr += pCtx->ecx; break;
12694 case 2: u32EffAddr += pCtx->edx; break;
12695 case 3: u32EffAddr += pCtx->ebx; break;
12696 case 4:
12697 u32EffAddr += pCtx->esp + offRsp;
12698 SET_SS_DEF();
12699 break;
12700 case 5:
12701 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12702 {
12703 u32EffAddr += pCtx->ebp;
12704 SET_SS_DEF();
12705 }
12706 else
12707 {
12708 uint32_t u32Disp;
12709 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12710 u32EffAddr += u32Disp;
12711 }
12712 break;
12713 case 6: u32EffAddr += pCtx->esi; break;
12714 case 7: u32EffAddr += pCtx->edi; break;
12715 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12716 }
12717 break;
12718 }
12719 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12720 case 6: u32EffAddr = pCtx->esi; break;
12721 case 7: u32EffAddr = pCtx->edi; break;
12722 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12723 }
12724
12725 /* Get and add the displacement. */
12726 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12727 {
12728 case 0:
12729 break;
12730 case 1:
12731 {
12732 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12733 u32EffAddr += i8Disp;
12734 break;
12735 }
12736 case 2:
12737 {
12738 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12739 u32EffAddr += u32Disp;
12740 break;
12741 }
12742 default:
12743 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12744 }
12745
12746 }
12747 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12748 *pGCPtrEff = u32EffAddr;
12749 else
12750 {
12751 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12752 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12753 }
12754 }
12755 }
12756 else
12757 {
12758 uint64_t u64EffAddr;
12759
12760 /* Handle the rip+disp32 form with no registers first. */
12761 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12762 {
12763 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12764 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12765 }
12766 else
12767 {
12768 /* Get the register (or SIB) value. */
12769 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12770 {
12771 case 0: u64EffAddr = pCtx->rax; break;
12772 case 1: u64EffAddr = pCtx->rcx; break;
12773 case 2: u64EffAddr = pCtx->rdx; break;
12774 case 3: u64EffAddr = pCtx->rbx; break;
12775 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12776 case 6: u64EffAddr = pCtx->rsi; break;
12777 case 7: u64EffAddr = pCtx->rdi; break;
12778 case 8: u64EffAddr = pCtx->r8; break;
12779 case 9: u64EffAddr = pCtx->r9; break;
12780 case 10: u64EffAddr = pCtx->r10; break;
12781 case 11: u64EffAddr = pCtx->r11; break;
12782 case 13: u64EffAddr = pCtx->r13; break;
12783 case 14: u64EffAddr = pCtx->r14; break;
12784 case 15: u64EffAddr = pCtx->r15; break;
12785 /* SIB */
12786 case 4:
12787 case 12:
12788 {
12789 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12790
12791 /* Get the index and scale it. */
12792 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12793 {
12794 case 0: u64EffAddr = pCtx->rax; break;
12795 case 1: u64EffAddr = pCtx->rcx; break;
12796 case 2: u64EffAddr = pCtx->rdx; break;
12797 case 3: u64EffAddr = pCtx->rbx; break;
12798 case 4: u64EffAddr = 0; /*none */ break;
12799 case 5: u64EffAddr = pCtx->rbp; break;
12800 case 6: u64EffAddr = pCtx->rsi; break;
12801 case 7: u64EffAddr = pCtx->rdi; break;
12802 case 8: u64EffAddr = pCtx->r8; break;
12803 case 9: u64EffAddr = pCtx->r9; break;
12804 case 10: u64EffAddr = pCtx->r10; break;
12805 case 11: u64EffAddr = pCtx->r11; break;
12806 case 12: u64EffAddr = pCtx->r12; break;
12807 case 13: u64EffAddr = pCtx->r13; break;
12808 case 14: u64EffAddr = pCtx->r14; break;
12809 case 15: u64EffAddr = pCtx->r15; break;
12810 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12811 }
12812 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12813
12814 /* add base */
12815 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12816 {
12817 case 0: u64EffAddr += pCtx->rax; break;
12818 case 1: u64EffAddr += pCtx->rcx; break;
12819 case 2: u64EffAddr += pCtx->rdx; break;
12820 case 3: u64EffAddr += pCtx->rbx; break;
12821 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
12822 case 6: u64EffAddr += pCtx->rsi; break;
12823 case 7: u64EffAddr += pCtx->rdi; break;
12824 case 8: u64EffAddr += pCtx->r8; break;
12825 case 9: u64EffAddr += pCtx->r9; break;
12826 case 10: u64EffAddr += pCtx->r10; break;
12827 case 11: u64EffAddr += pCtx->r11; break;
12828 case 12: u64EffAddr += pCtx->r12; break;
12829 case 14: u64EffAddr += pCtx->r14; break;
12830 case 15: u64EffAddr += pCtx->r15; break;
12831 /* complicated encodings */
12832 case 5:
12833 case 13:
12834 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12835 {
12836 if (!pVCpu->iem.s.uRexB)
12837 {
12838 u64EffAddr += pCtx->rbp;
12839 SET_SS_DEF();
12840 }
12841 else
12842 u64EffAddr += pCtx->r13;
12843 }
12844 else
12845 {
12846 uint32_t u32Disp;
12847 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12848 u64EffAddr += (int32_t)u32Disp;
12849 }
12850 break;
12851 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12852 }
12853 break;
12854 }
12855 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12856 }
12857
12858 /* Get and add the displacement. */
12859 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12860 {
12861 case 0:
12862 break;
12863 case 1:
12864 {
12865 int8_t i8Disp;
12866 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12867 u64EffAddr += i8Disp;
12868 break;
12869 }
12870 case 2:
12871 {
12872 uint32_t u32Disp;
12873 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12874 u64EffAddr += (int32_t)u32Disp;
12875 break;
12876 }
12877 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
12878 }
12879
12880 }
12881
12882 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
12883 *pGCPtrEff = u64EffAddr;
12884 else
12885 {
12886 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12887 *pGCPtrEff = u64EffAddr & UINT32_MAX;
12888 }
12889 }
12890
12891 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
12892 return VINF_SUCCESS;
12893}
12894
12895
12896#ifdef IEM_WITH_SETJMP
12897/**
12898 * Calculates the effective address of a ModR/M memory operand.
12899 *
12900 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12901 *
12902 * May longjmp on internal error.
12903 *
12904 * @return The effective address.
12905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12906 * @param bRm The ModRM byte.
12907 * @param cbImm The size of any immediate following the
12908 * effective address opcode bytes. Important for
12909 * RIP relative addressing.
12910 */
12911IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
12912{
12913 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
12914 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12915# define SET_SS_DEF() \
12916 do \
12917 { \
12918 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12919 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12920 } while (0)
12921
12922 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12923 {
12924/** @todo Check the effective address size crap! */
12925 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12926 {
12927 uint16_t u16EffAddr;
12928
12929 /* Handle the disp16 form with no registers first. */
12930 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12931 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12932 else
12933 {
12934 /* Get the displacment. */
12935 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12936 {
12937 case 0: u16EffAddr = 0; break;
12938 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12939 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12940 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
12941 }
12942
12943 /* Add the base and index registers to the disp. */
12944 switch (bRm & X86_MODRM_RM_MASK)
12945 {
12946 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12947 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12948 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12949 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12950 case 4: u16EffAddr += pCtx->si; break;
12951 case 5: u16EffAddr += pCtx->di; break;
12952 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12953 case 7: u16EffAddr += pCtx->bx; break;
12954 }
12955 }
12956
12957 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
12958 return u16EffAddr;
12959 }
12960
12961 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12962 uint32_t u32EffAddr;
12963
12964 /* Handle the disp32 form with no registers first. */
12965 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12966 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12967 else
12968 {
12969 /* Get the register (or SIB) value. */
12970 switch ((bRm & X86_MODRM_RM_MASK))
12971 {
12972 case 0: u32EffAddr = pCtx->eax; break;
12973 case 1: u32EffAddr = pCtx->ecx; break;
12974 case 2: u32EffAddr = pCtx->edx; break;
12975 case 3: u32EffAddr = pCtx->ebx; break;
12976 case 4: /* SIB */
12977 {
12978 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12979
12980 /* Get the index and scale it. */
12981 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12982 {
12983 case 0: u32EffAddr = pCtx->eax; break;
12984 case 1: u32EffAddr = pCtx->ecx; break;
12985 case 2: u32EffAddr = pCtx->edx; break;
12986 case 3: u32EffAddr = pCtx->ebx; break;
12987 case 4: u32EffAddr = 0; /*none */ break;
12988 case 5: u32EffAddr = pCtx->ebp; break;
12989 case 6: u32EffAddr = pCtx->esi; break;
12990 case 7: u32EffAddr = pCtx->edi; break;
12991 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
12992 }
12993 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12994
12995 /* add base */
12996 switch (bSib & X86_SIB_BASE_MASK)
12997 {
12998 case 0: u32EffAddr += pCtx->eax; break;
12999 case 1: u32EffAddr += pCtx->ecx; break;
13000 case 2: u32EffAddr += pCtx->edx; break;
13001 case 3: u32EffAddr += pCtx->ebx; break;
13002 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13003 case 5:
13004 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13005 {
13006 u32EffAddr += pCtx->ebp;
13007 SET_SS_DEF();
13008 }
13009 else
13010 {
13011 uint32_t u32Disp;
13012 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13013 u32EffAddr += u32Disp;
13014 }
13015 break;
13016 case 6: u32EffAddr += pCtx->esi; break;
13017 case 7: u32EffAddr += pCtx->edi; break;
13018 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13019 }
13020 break;
13021 }
13022 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13023 case 6: u32EffAddr = pCtx->esi; break;
13024 case 7: u32EffAddr = pCtx->edi; break;
13025 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13026 }
13027
13028 /* Get and add the displacement. */
13029 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13030 {
13031 case 0:
13032 break;
13033 case 1:
13034 {
13035 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13036 u32EffAddr += i8Disp;
13037 break;
13038 }
13039 case 2:
13040 {
13041 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13042 u32EffAddr += u32Disp;
13043 break;
13044 }
13045 default:
13046 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13047 }
13048 }
13049
13050 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13051 {
13052 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13053 return u32EffAddr;
13054 }
13055 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13056 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13057 return u32EffAddr & UINT16_MAX;
13058 }
13059
13060 uint64_t u64EffAddr;
13061
13062 /* Handle the rip+disp32 form with no registers first. */
13063 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13064 {
13065 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13066 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13067 }
13068 else
13069 {
13070 /* Get the register (or SIB) value. */
13071 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13072 {
13073 case 0: u64EffAddr = pCtx->rax; break;
13074 case 1: u64EffAddr = pCtx->rcx; break;
13075 case 2: u64EffAddr = pCtx->rdx; break;
13076 case 3: u64EffAddr = pCtx->rbx; break;
13077 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13078 case 6: u64EffAddr = pCtx->rsi; break;
13079 case 7: u64EffAddr = pCtx->rdi; break;
13080 case 8: u64EffAddr = pCtx->r8; break;
13081 case 9: u64EffAddr = pCtx->r9; break;
13082 case 10: u64EffAddr = pCtx->r10; break;
13083 case 11: u64EffAddr = pCtx->r11; break;
13084 case 13: u64EffAddr = pCtx->r13; break;
13085 case 14: u64EffAddr = pCtx->r14; break;
13086 case 15: u64EffAddr = pCtx->r15; break;
13087 /* SIB */
13088 case 4:
13089 case 12:
13090 {
13091 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13092
13093 /* Get the index and scale it. */
13094 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13095 {
13096 case 0: u64EffAddr = pCtx->rax; break;
13097 case 1: u64EffAddr = pCtx->rcx; break;
13098 case 2: u64EffAddr = pCtx->rdx; break;
13099 case 3: u64EffAddr = pCtx->rbx; break;
13100 case 4: u64EffAddr = 0; /*none */ break;
13101 case 5: u64EffAddr = pCtx->rbp; break;
13102 case 6: u64EffAddr = pCtx->rsi; break;
13103 case 7: u64EffAddr = pCtx->rdi; break;
13104 case 8: u64EffAddr = pCtx->r8; break;
13105 case 9: u64EffAddr = pCtx->r9; break;
13106 case 10: u64EffAddr = pCtx->r10; break;
13107 case 11: u64EffAddr = pCtx->r11; break;
13108 case 12: u64EffAddr = pCtx->r12; break;
13109 case 13: u64EffAddr = pCtx->r13; break;
13110 case 14: u64EffAddr = pCtx->r14; break;
13111 case 15: u64EffAddr = pCtx->r15; break;
13112 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13113 }
13114 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13115
13116 /* add base */
13117 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13118 {
13119 case 0: u64EffAddr += pCtx->rax; break;
13120 case 1: u64EffAddr += pCtx->rcx; break;
13121 case 2: u64EffAddr += pCtx->rdx; break;
13122 case 3: u64EffAddr += pCtx->rbx; break;
13123 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13124 case 6: u64EffAddr += pCtx->rsi; break;
13125 case 7: u64EffAddr += pCtx->rdi; break;
13126 case 8: u64EffAddr += pCtx->r8; break;
13127 case 9: u64EffAddr += pCtx->r9; break;
13128 case 10: u64EffAddr += pCtx->r10; break;
13129 case 11: u64EffAddr += pCtx->r11; break;
13130 case 12: u64EffAddr += pCtx->r12; break;
13131 case 14: u64EffAddr += pCtx->r14; break;
13132 case 15: u64EffAddr += pCtx->r15; break;
13133 /* complicated encodings */
13134 case 5:
13135 case 13:
13136 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13137 {
13138 if (!pVCpu->iem.s.uRexB)
13139 {
13140 u64EffAddr += pCtx->rbp;
13141 SET_SS_DEF();
13142 }
13143 else
13144 u64EffAddr += pCtx->r13;
13145 }
13146 else
13147 {
13148 uint32_t u32Disp;
13149 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13150 u64EffAddr += (int32_t)u32Disp;
13151 }
13152 break;
13153 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13154 }
13155 break;
13156 }
13157 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13158 }
13159
13160 /* Get and add the displacement. */
13161 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13162 {
13163 case 0:
13164 break;
13165 case 1:
13166 {
13167 int8_t i8Disp;
13168 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13169 u64EffAddr += i8Disp;
13170 break;
13171 }
13172 case 2:
13173 {
13174 uint32_t u32Disp;
13175 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13176 u64EffAddr += (int32_t)u32Disp;
13177 break;
13178 }
13179 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13180 }
13181
13182 }
13183
13184 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13185 {
13186 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13187 return u64EffAddr;
13188 }
13189 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13190 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13191 return u64EffAddr & UINT32_MAX;
13192}
13193#endif /* IEM_WITH_SETJMP */
13194
13195
13196/** @} */
13197
13198
13199
13200/*
13201 * Include the instructions
13202 */
13203#include "IEMAllInstructions.cpp.h"
13204
13205
13206
13207
13208#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
13209
13210/**
13211 * Sets up execution verification mode.
13212 */
13213IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)
13214{
13215 PVMCPU pVCpu = pVCpu;
13216 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);
13217
13218 /*
13219 * Always note down the address of the current instruction.
13220 */
13221 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;
13222 pVCpu->iem.s.uOldRip = pOrgCtx->rip;
13223
13224 /*
13225 * Enable verification and/or logging.
13226 */
13227 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;
13228 if ( fNewNoRem
13229 && ( 0
13230#if 0 /* auto enable on first paged protected mode interrupt */
13231 || ( pOrgCtx->eflags.Bits.u1IF
13232 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)
13233 && TRPMHasTrap(pVCpu)
13234 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )
13235#endif
13236#if 0
13237 || ( pOrgCtx->cs == 0x10
13238 && ( pOrgCtx->rip == 0x90119e3e
13239 || pOrgCtx->rip == 0x901d9810)
13240#endif
13241#if 0 /* Auto enable DSL - FPU stuff. */
13242 || ( pOrgCtx->cs == 0x10
13243 && (// pOrgCtx->rip == 0xc02ec07f
13244 //|| pOrgCtx->rip == 0xc02ec082
13245 //|| pOrgCtx->rip == 0xc02ec0c9
13246 0
13247 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )
13248#endif
13249#if 0 /* Auto enable DSL - fstp st0 stuff. */
13250 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)
13251#endif
13252#if 0
13253 || pOrgCtx->rip == 0x9022bb3a
13254#endif
13255#if 0
13256 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */
13257#endif
13258#if 0
13259 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */
13260 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */
13261#endif
13262#if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */
13263 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)
13264 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)
13265 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)
13266#endif
13267#if 0 /* NT4SP1 - xadd early boot. */
13268 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)
13269#endif
13270#if 0 /* NT4SP1 - wrmsr (intel MSR). */
13271 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)
13272#endif
13273#if 0 /* NT4SP1 - cmpxchg (AMD). */
13274 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)
13275#endif
13276#if 0 /* NT4SP1 - fnstsw + 2 (AMD). */
13277 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)
13278#endif
13279#if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */
13280 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)
13281
13282#endif
13283#if 0 /* NT4SP1 - iret to v8086 (executing edlin) */
13284 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)
13285
13286#endif
13287#if 0 /* NT4SP1 - frstor [ecx] */
13288 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)
13289#endif
13290#if 0 /* xxxxxx - All long mode code. */
13291 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)
13292#endif
13293#if 0 /* rep movsq linux 3.7 64-bit boot. */
13294 || (pOrgCtx->rip == 0x0000000000100241)
13295#endif
13296#if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */
13297 || (pOrgCtx->rip == 0x000000000215e240)
13298#endif
13299#if 0 /* DOS's size-overridden iret to v8086. */
13300 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)
13301#endif
13302 )
13303 )
13304 {
13305 RTLogGroupSettings(NULL, "iem.eo.l6.l2");
13306 RTLogFlags(NULL, "enabled");
13307 fNewNoRem = false;
13308 }
13309 if (fNewNoRem != pVCpu->iem.s.fNoRem)
13310 {
13311 pVCpu->iem.s.fNoRem = fNewNoRem;
13312 if (!fNewNoRem)
13313 {
13314 LogAlways(("Enabling verification mode!\n"));
13315 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
13316 }
13317 else
13318 LogAlways(("Disabling verification mode!\n"));
13319 }
13320
13321 /*
13322 * Switch state.
13323 */
13324 if (IEM_VERIFICATION_ENABLED(pVCpu))
13325 {
13326 static CPUMCTX s_DebugCtx; /* Ugly! */
13327
13328 s_DebugCtx = *pOrgCtx;
13329 IEM_GET_CTX(pVCpu) = &s_DebugCtx;
13330 }
13331
13332 /*
13333 * See if there is an interrupt pending in TRPM and inject it if we can.
13334 */
13335 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
13336 if ( pOrgCtx->eflags.Bits.u1IF
13337 && TRPMHasTrap(pVCpu)
13338 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)
13339 {
13340 uint8_t u8TrapNo;
13341 TRPMEVENT enmType;
13342 RTGCUINT uErrCode;
13343 RTGCPTR uCr2;
13344 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
13345 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
13346 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13347 TRPMResetTrap(pVCpu);
13348 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;
13349 }
13350
13351 /*
13352 * Reset the counters.
13353 */
13354 pVCpu->iem.s.cIOReads = 0;
13355 pVCpu->iem.s.cIOWrites = 0;
13356 pVCpu->iem.s.fIgnoreRaxRdx = false;
13357 pVCpu->iem.s.fOverlappingMovs = false;
13358 pVCpu->iem.s.fProblematicMemory = false;
13359 pVCpu->iem.s.fUndefinedEFlags = 0;
13360
13361 if (IEM_VERIFICATION_ENABLED(pVCpu))
13362 {
13363 /*
13364 * Free all verification records.
13365 */
13366 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;
13367 pVCpu->iem.s.pIemEvtRecHead = NULL;
13368 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;
13369 do
13370 {
13371 while (pEvtRec)
13372 {
13373 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;
13374 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;
13375 pVCpu->iem.s.pFreeEvtRec = pEvtRec;
13376 pEvtRec = pNext;
13377 }
13378 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;
13379 pVCpu->iem.s.pOtherEvtRecHead = NULL;
13380 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;
13381 } while (pEvtRec);
13382 }
13383}
13384
13385
13386/**
13387 * Allocate an event record.
13388 * @returns Pointer to a record.
13389 */
13390IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)
13391{
13392 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13393 return NULL;
13394
13395 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;
13396 if (pEvtRec)
13397 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;
13398 else
13399 {
13400 if (!pVCpu->iem.s.ppIemEvtRecNext)
13401 return NULL; /* Too early (fake PCIBIOS), ignore notification. */
13402
13403 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));
13404 if (!pEvtRec)
13405 return NULL;
13406 }
13407 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;
13408 pEvtRec->pNext = NULL;
13409 return pEvtRec;
13410}
13411
13412
13413/**
13414 * IOMMMIORead notification.
13415 */
13416VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)
13417{
13418 PVMCPU pVCpu = VMMGetCpu(pVM);
13419 if (!pVCpu)
13420 return;
13421 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13422 if (!pEvtRec)
13423 return;
13424 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;
13425 pEvtRec->u.RamRead.GCPhys = GCPhys;
13426 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;
13427 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13428 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13429}
13430
13431
13432/**
13433 * IOMMMIOWrite notification.
13434 */
13435VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)
13436{
13437 PVMCPU pVCpu = VMMGetCpu(pVM);
13438 if (!pVCpu)
13439 return;
13440 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13441 if (!pEvtRec)
13442 return;
13443 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;
13444 pEvtRec->u.RamWrite.GCPhys = GCPhys;
13445 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;
13446 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);
13447 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);
13448 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);
13449 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);
13450 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13451 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13452}
13453
13454
13455/**
13456 * IOMIOPortRead notification.
13457 */
13458VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)
13459{
13460 PVMCPU pVCpu = VMMGetCpu(pVM);
13461 if (!pVCpu)
13462 return;
13463 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13464 if (!pEvtRec)
13465 return;
13466 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13467 pEvtRec->u.IOPortRead.Port = Port;
13468 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13469 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13470 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13471}
13472
13473/**
13474 * IOMIOPortWrite notification.
13475 */
13476VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13477{
13478 PVMCPU pVCpu = VMMGetCpu(pVM);
13479 if (!pVCpu)
13480 return;
13481 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13482 if (!pEvtRec)
13483 return;
13484 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13485 pEvtRec->u.IOPortWrite.Port = Port;
13486 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13487 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13488 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13489 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13490}
13491
13492
13493VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)
13494{
13495 PVMCPU pVCpu = VMMGetCpu(pVM);
13496 if (!pVCpu)
13497 return;
13498 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13499 if (!pEvtRec)
13500 return;
13501 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;
13502 pEvtRec->u.IOPortStrRead.Port = Port;
13503 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;
13504 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;
13505 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13506 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13507}
13508
13509
13510VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)
13511{
13512 PVMCPU pVCpu = VMMGetCpu(pVM);
13513 if (!pVCpu)
13514 return;
13515 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13516 if (!pEvtRec)
13517 return;
13518 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;
13519 pEvtRec->u.IOPortStrWrite.Port = Port;
13520 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;
13521 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;
13522 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;
13523 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;
13524}
13525
13526
13527/**
13528 * Fakes and records an I/O port read.
13529 *
13530 * @returns VINF_SUCCESS.
13531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13532 * @param Port The I/O port.
13533 * @param pu32Value Where to store the fake value.
13534 * @param cbValue The size of the access.
13535 */
13536IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
13537{
13538 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13539 if (pEvtRec)
13540 {
13541 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;
13542 pEvtRec->u.IOPortRead.Port = Port;
13543 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;
13544 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13545 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13546 }
13547 pVCpu->iem.s.cIOReads++;
13548 *pu32Value = 0xcccccccc;
13549 return VINF_SUCCESS;
13550}
13551
13552
13553/**
13554 * Fakes and records an I/O port write.
13555 *
13556 * @returns VINF_SUCCESS.
13557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13558 * @param Port The I/O port.
13559 * @param u32Value The value being written.
13560 * @param cbValue The size of the access.
13561 */
13562IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
13563{
13564 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);
13565 if (pEvtRec)
13566 {
13567 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;
13568 pEvtRec->u.IOPortWrite.Port = Port;
13569 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;
13570 pEvtRec->u.IOPortWrite.u32Value = u32Value;
13571 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;
13572 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;
13573 }
13574 pVCpu->iem.s.cIOWrites++;
13575 return VINF_SUCCESS;
13576}
13577
13578
13579/**
13580 * Used to add extra details about a stub case.
13581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13582 */
13583IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)
13584{
13585 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13586 PVM pVM = pVCpu->CTX_SUFF(pVM);
13587 PVMCPU pVCpu = pVCpu;
13588 char szRegs[4096];
13589 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
13590 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
13591 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
13592 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
13593 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
13594 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
13595 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
13596 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
13597 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
13598 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
13599 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
13600 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
13601 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
13602 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
13603 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
13604 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
13605 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
13606 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
13607 " efer=%016VR{efer}\n"
13608 " pat=%016VR{pat}\n"
13609 " sf_mask=%016VR{sf_mask}\n"
13610 "krnl_gs_base=%016VR{krnl_gs_base}\n"
13611 " lstar=%016VR{lstar}\n"
13612 " star=%016VR{star} cstar=%016VR{cstar}\n"
13613 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
13614 );
13615
13616 char szInstr1[256];
13617 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,
13618 DBGF_DISAS_FLAGS_DEFAULT_MODE,
13619 szInstr1, sizeof(szInstr1), NULL);
13620 char szInstr2[256];
13621 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
13622 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13623 szInstr2, sizeof(szInstr2), NULL);
13624
13625 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);
13626}
13627
13628
13629/**
13630 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record
13631 * dump to the assertion info.
13632 *
13633 * @param pEvtRec The record to dump.
13634 */
13635IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)
13636{
13637 switch (pEvtRec->enmEvent)
13638 {
13639 case IEMVERIFYEVENT_IOPORT_READ:
13640 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",
13641 pEvtRec->u.IOPortWrite.Port,
13642 pEvtRec->u.IOPortWrite.cbValue);
13643 break;
13644 case IEMVERIFYEVENT_IOPORT_WRITE:
13645 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",
13646 pEvtRec->u.IOPortWrite.Port,
13647 pEvtRec->u.IOPortWrite.cbValue,
13648 pEvtRec->u.IOPortWrite.u32Value);
13649 break;
13650 case IEMVERIFYEVENT_IOPORT_STR_READ:
13651 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",
13652 pEvtRec->u.IOPortStrWrite.Port,
13653 pEvtRec->u.IOPortStrWrite.cbValue,
13654 pEvtRec->u.IOPortStrWrite.cTransfers);
13655 break;
13656 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
13657 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",
13658 pEvtRec->u.IOPortStrWrite.Port,
13659 pEvtRec->u.IOPortStrWrite.cbValue,
13660 pEvtRec->u.IOPortStrWrite.cTransfers);
13661 break;
13662 case IEMVERIFYEVENT_RAM_READ:
13663 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",
13664 pEvtRec->u.RamRead.GCPhys,
13665 pEvtRec->u.RamRead.cb);
13666 break;
13667 case IEMVERIFYEVENT_RAM_WRITE:
13668 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",
13669 pEvtRec->u.RamWrite.GCPhys,
13670 pEvtRec->u.RamWrite.cb,
13671 (int)pEvtRec->u.RamWrite.cb,
13672 pEvtRec->u.RamWrite.ab);
13673 break;
13674 default:
13675 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));
13676 break;
13677 }
13678}
13679
13680
13681/**
13682 * Raises an assertion on the specified record, showing the given message with
13683 * a record dump attached.
13684 *
13685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13686 * @param pEvtRec1 The first record.
13687 * @param pEvtRec2 The second record.
13688 * @param pszMsg The message explaining why we're asserting.
13689 */
13690IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)
13691{
13692 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13693 iemVerifyAssertAddRecordDump(pEvtRec1);
13694 iemVerifyAssertAddRecordDump(pEvtRec2);
13695 iemVerifyAssertMsg2(pVCpu);
13696 RTAssertPanic();
13697}
13698
13699
13700/**
13701 * Raises an assertion on the specified record, showing the given message with
13702 * a record dump attached.
13703 *
13704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13705 * @param pEvtRec1 The first record.
13706 * @param pszMsg The message explaining why we're asserting.
13707 */
13708IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)
13709{
13710 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13711 iemVerifyAssertAddRecordDump(pEvtRec);
13712 iemVerifyAssertMsg2(pVCpu);
13713 RTAssertPanic();
13714}
13715
13716
13717/**
13718 * Verifies a write record.
13719 *
13720 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13721 * @param pEvtRec The write record.
13722 * @param fRem Set if REM was doing the other executing. If clear
13723 * it was HM.
13724 */
13725IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)
13726{
13727 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);
13728 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);
13729 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);
13730 if ( RT_FAILURE(rc)
13731 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )
13732 {
13733 /* fend off ins */
13734 if ( !pVCpu->iem.s.cIOReads
13735 || pEvtRec->u.RamWrite.ab[0] != 0xcc
13736 || ( pEvtRec->u.RamWrite.cb != 1
13737 && pEvtRec->u.RamWrite.cb != 2
13738 && pEvtRec->u.RamWrite.cb != 4) )
13739 {
13740 /* fend off ROMs and MMIO */
13741 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)
13742 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )
13743 {
13744 /* fend off fxsave */
13745 if (pEvtRec->u.RamWrite.cb != 512)
13746 {
13747 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";
13748 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);
13749 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);
13750 RTAssertMsg2Add("%s: %.*Rhxs\n"
13751 "iem: %.*Rhxs\n",
13752 pszWho, pEvtRec->u.RamWrite.cb, abBuf,
13753 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);
13754 iemVerifyAssertAddRecordDump(pEvtRec);
13755 iemVerifyAssertMsg2(pVCpu);
13756 RTAssertPanic();
13757 }
13758 }
13759 }
13760 }
13761
13762}
13763
13764/**
13765 * Performs the post-execution verfication checks.
13766 */
13767IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)
13768{
13769 if (!IEM_VERIFICATION_ENABLED(pVCpu))
13770 return rcStrictIem;
13771
13772 /*
13773 * Switch back the state.
13774 */
13775 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);
13776 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);
13777 Assert(pOrgCtx != pDebugCtx);
13778 IEM_GET_CTX(pVCpu) = pOrgCtx;
13779
13780 /*
13781 * Execute the instruction in REM.
13782 */
13783 bool fRem = false;
13784 PVM pVM = pVCpu->CTX_SUFF(pVM);
13785 PVMCPU pVCpu = pVCpu;
13786 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;
13787#ifdef IEM_VERIFICATION_MODE_FULL_HM
13788 if ( HMIsEnabled(pVM)
13789 && pVCpu->iem.s.cIOReads == 0
13790 && pVCpu->iem.s.cIOWrites == 0
13791 && !pVCpu->iem.s.fProblematicMemory)
13792 {
13793 uint64_t uStartRip = pOrgCtx->rip;
13794 unsigned iLoops = 0;
13795 do
13796 {
13797 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);
13798 iLoops++;
13799 } while ( rc == VINF_SUCCESS
13800 || ( rc == VINF_EM_DBG_STEPPED
13801 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13802 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)
13803 || ( pOrgCtx->rip != pDebugCtx->rip
13804 && pVCpu->iem.s.uInjectCpl != UINT8_MAX
13805 && iLoops < 8) );
13806 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)
13807 rc = VINF_SUCCESS;
13808 }
13809#endif
13810 if ( rc == VERR_EM_CANNOT_EXEC_GUEST
13811 || rc == VINF_IOM_R3_IOPORT_READ
13812 || rc == VINF_IOM_R3_IOPORT_WRITE
13813 || rc == VINF_IOM_R3_MMIO_READ
13814 || rc == VINF_IOM_R3_MMIO_READ_WRITE
13815 || rc == VINF_IOM_R3_MMIO_WRITE
13816 || rc == VINF_CPUM_R3_MSR_READ
13817 || rc == VINF_CPUM_R3_MSR_WRITE
13818 || rc == VINF_EM_RESCHEDULE
13819 )
13820 {
13821 EMRemLock(pVM);
13822 rc = REMR3EmulateInstruction(pVM, pVCpu);
13823 AssertRC(rc);
13824 EMRemUnlock(pVM);
13825 fRem = true;
13826 }
13827
13828# if 1 /* Skip unimplemented instructions for now. */
13829 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13830 {
13831 IEM_GET_CTX(pVCpu) = pOrgCtx;
13832 if (rc == VINF_EM_DBG_STEPPED)
13833 return VINF_SUCCESS;
13834 return rc;
13835 }
13836# endif
13837
13838 /*
13839 * Compare the register states.
13840 */
13841 unsigned cDiffs = 0;
13842 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))
13843 {
13844 //Log(("REM and IEM ends up with different registers!\n"));
13845 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";
13846
13847# define CHECK_FIELD(a_Field) \
13848 do \
13849 { \
13850 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13851 { \
13852 switch (sizeof(pOrgCtx->a_Field)) \
13853 { \
13854 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13855 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13856 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13857 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \
13858 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13859 } \
13860 cDiffs++; \
13861 } \
13862 } while (0)
13863# define CHECK_XSTATE_FIELD(a_Field) \
13864 do \
13865 { \
13866 if (pOrgXState->a_Field != pDebugXState->a_Field) \
13867 { \
13868 switch (sizeof(pOrgXState->a_Field)) \
13869 { \
13870 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13871 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13872 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13873 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \
13874 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \
13875 } \
13876 cDiffs++; \
13877 } \
13878 } while (0)
13879
13880# define CHECK_BIT_FIELD(a_Field) \
13881 do \
13882 { \
13883 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \
13884 { \
13885 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \
13886 cDiffs++; \
13887 } \
13888 } while (0)
13889
13890# define CHECK_SEL(a_Sel) \
13891 do \
13892 { \
13893 CHECK_FIELD(a_Sel.Sel); \
13894 CHECK_FIELD(a_Sel.Attr.u); \
13895 CHECK_FIELD(a_Sel.u64Base); \
13896 CHECK_FIELD(a_Sel.u32Limit); \
13897 CHECK_FIELD(a_Sel.fFlags); \
13898 } while (0)
13899
13900 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);
13901 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);
13902
13903#if 1 /* The recompiler doesn't update these the intel way. */
13904 if (fRem)
13905 {
13906 pOrgXState->x87.FOP = pDebugXState->x87.FOP;
13907 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;
13908 pOrgXState->x87.CS = pDebugXState->x87.CS;
13909 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;
13910 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;
13911 pOrgXState->x87.DS = pDebugXState->x87.DS;
13912 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;
13913 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;
13914 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))
13915 pOrgXState->x87.FSW = pDebugXState->x87.FSW;
13916 }
13917#endif
13918 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))
13919 {
13920 RTAssertMsg2Weak(" the FPU state differs\n");
13921 cDiffs++;
13922 CHECK_XSTATE_FIELD(x87.FCW);
13923 CHECK_XSTATE_FIELD(x87.FSW);
13924 CHECK_XSTATE_FIELD(x87.FTW);
13925 CHECK_XSTATE_FIELD(x87.FOP);
13926 CHECK_XSTATE_FIELD(x87.FPUIP);
13927 CHECK_XSTATE_FIELD(x87.CS);
13928 CHECK_XSTATE_FIELD(x87.Rsrvd1);
13929 CHECK_XSTATE_FIELD(x87.FPUDP);
13930 CHECK_XSTATE_FIELD(x87.DS);
13931 CHECK_XSTATE_FIELD(x87.Rsrvd2);
13932 CHECK_XSTATE_FIELD(x87.MXCSR);
13933 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);
13934 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);
13935 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);
13936 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);
13937 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);
13938 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);
13939 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);
13940 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);
13941 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);
13942 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);
13943 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);
13944 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);
13945 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);
13946 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);
13947 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);
13948 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);
13949 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);
13950 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);
13951 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);
13952 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);
13953 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);
13954 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);
13955 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);
13956 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);
13957 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);
13958 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)
13959 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);
13960 }
13961 CHECK_FIELD(rip);
13962 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;
13963 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))
13964 {
13965 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);
13966 CHECK_BIT_FIELD(rflags.Bits.u1CF);
13967 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);
13968 CHECK_BIT_FIELD(rflags.Bits.u1PF);
13969 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);
13970 CHECK_BIT_FIELD(rflags.Bits.u1AF);
13971 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);
13972 CHECK_BIT_FIELD(rflags.Bits.u1ZF);
13973 CHECK_BIT_FIELD(rflags.Bits.u1SF);
13974 CHECK_BIT_FIELD(rflags.Bits.u1TF);
13975 CHECK_BIT_FIELD(rflags.Bits.u1IF);
13976 CHECK_BIT_FIELD(rflags.Bits.u1DF);
13977 CHECK_BIT_FIELD(rflags.Bits.u1OF);
13978 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);
13979 CHECK_BIT_FIELD(rflags.Bits.u1NT);
13980 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);
13981 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */
13982 CHECK_BIT_FIELD(rflags.Bits.u1RF);
13983 CHECK_BIT_FIELD(rflags.Bits.u1VM);
13984 CHECK_BIT_FIELD(rflags.Bits.u1AC);
13985 CHECK_BIT_FIELD(rflags.Bits.u1VIF);
13986 CHECK_BIT_FIELD(rflags.Bits.u1VIP);
13987 CHECK_BIT_FIELD(rflags.Bits.u1ID);
13988 }
13989
13990 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)
13991 CHECK_FIELD(rax);
13992 CHECK_FIELD(rcx);
13993 if (!pVCpu->iem.s.fIgnoreRaxRdx)
13994 CHECK_FIELD(rdx);
13995 CHECK_FIELD(rbx);
13996 CHECK_FIELD(rsp);
13997 CHECK_FIELD(rbp);
13998 CHECK_FIELD(rsi);
13999 CHECK_FIELD(rdi);
14000 CHECK_FIELD(r8);
14001 CHECK_FIELD(r9);
14002 CHECK_FIELD(r10);
14003 CHECK_FIELD(r11);
14004 CHECK_FIELD(r12);
14005 CHECK_FIELD(r13);
14006 CHECK_SEL(cs);
14007 CHECK_SEL(ss);
14008 CHECK_SEL(ds);
14009 CHECK_SEL(es);
14010 CHECK_SEL(fs);
14011 CHECK_SEL(gs);
14012 CHECK_FIELD(cr0);
14013
14014 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute
14015 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */
14016 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access
14017 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */
14018 if (pOrgCtx->cr2 != pDebugCtx->cr2)
14019 {
14020 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)
14021 { /* ignore */ }
14022 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)
14023 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 0
14024 && fRem)
14025 { /* ignore */ }
14026 else
14027 CHECK_FIELD(cr2);
14028 }
14029 CHECK_FIELD(cr3);
14030 CHECK_FIELD(cr4);
14031 CHECK_FIELD(dr[0]);
14032 CHECK_FIELD(dr[1]);
14033 CHECK_FIELD(dr[2]);
14034 CHECK_FIELD(dr[3]);
14035 CHECK_FIELD(dr[6]);
14036 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/
14037 CHECK_FIELD(dr[7]);
14038 CHECK_FIELD(gdtr.cbGdt);
14039 CHECK_FIELD(gdtr.pGdt);
14040 CHECK_FIELD(idtr.cbIdt);
14041 CHECK_FIELD(idtr.pIdt);
14042 CHECK_SEL(ldtr);
14043 CHECK_SEL(tr);
14044 CHECK_FIELD(SysEnter.cs);
14045 CHECK_FIELD(SysEnter.eip);
14046 CHECK_FIELD(SysEnter.esp);
14047 CHECK_FIELD(msrEFER);
14048 CHECK_FIELD(msrSTAR);
14049 CHECK_FIELD(msrPAT);
14050 CHECK_FIELD(msrLSTAR);
14051 CHECK_FIELD(msrCSTAR);
14052 CHECK_FIELD(msrSFMASK);
14053 CHECK_FIELD(msrKERNELGSBASE);
14054
14055 if (cDiffs != 0)
14056 {
14057 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14058 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
14059 RTAssertPanic();
14060 static bool volatile s_fEnterDebugger = true;
14061 if (s_fEnterDebugger)
14062 DBGFSTOP(pVM);
14063
14064# if 1 /* Ignore unimplemented instructions for now. */
14065 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14066 rcStrictIem = VINF_SUCCESS;
14067# endif
14068 }
14069# undef CHECK_FIELD
14070# undef CHECK_BIT_FIELD
14071 }
14072
14073 /*
14074 * If the register state compared fine, check the verification event
14075 * records.
14076 */
14077 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)
14078 {
14079 /*
14080 * Compare verficiation event records.
14081 * - I/O port accesses should be a 1:1 match.
14082 */
14083 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;
14084 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;
14085 while (pIemRec && pOtherRec)
14086 {
14087 /* Since we might miss RAM writes and reads, ignore reads and check
14088 that any written memory is the same extra ones. */
14089 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)
14090 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)
14091 && pIemRec->pNext)
14092 {
14093 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14094 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14095 pIemRec = pIemRec->pNext;
14096 }
14097
14098 /* Do the compare. */
14099 if (pIemRec->enmEvent != pOtherRec->enmEvent)
14100 {
14101 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");
14102 break;
14103 }
14104 bool fEquals;
14105 switch (pIemRec->enmEvent)
14106 {
14107 case IEMVERIFYEVENT_IOPORT_READ:
14108 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port
14109 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;
14110 break;
14111 case IEMVERIFYEVENT_IOPORT_WRITE:
14112 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port
14113 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue
14114 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;
14115 break;
14116 case IEMVERIFYEVENT_IOPORT_STR_READ:
14117 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port
14118 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue
14119 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;
14120 break;
14121 case IEMVERIFYEVENT_IOPORT_STR_WRITE:
14122 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port
14123 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue
14124 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;
14125 break;
14126 case IEMVERIFYEVENT_RAM_READ:
14127 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys
14128 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;
14129 break;
14130 case IEMVERIFYEVENT_RAM_WRITE:
14131 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys
14132 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb
14133 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);
14134 break;
14135 default:
14136 fEquals = false;
14137 break;
14138 }
14139 if (!fEquals)
14140 {
14141 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");
14142 break;
14143 }
14144
14145 /* advance */
14146 pIemRec = pIemRec->pNext;
14147 pOtherRec = pOtherRec->pNext;
14148 }
14149
14150 /* Ignore extra writes and reads. */
14151 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))
14152 {
14153 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)
14154 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);
14155 pIemRec = pIemRec->pNext;
14156 }
14157 if (pIemRec != NULL)
14158 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");
14159 else if (pOtherRec != NULL)
14160 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");
14161 }
14162 IEM_GET_CTX(pVCpu) = pOrgCtx;
14163
14164 return rcStrictIem;
14165}
14166
14167#else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14168
14169/* stubs */
14170IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)
14171{
14172 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);
14173 return VERR_INTERNAL_ERROR;
14174}
14175
14176IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)
14177{
14178 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);
14179 return VERR_INTERNAL_ERROR;
14180}
14181
14182#endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */
14183
14184
14185#ifdef LOG_ENABLED
14186/**
14187 * Logs the current instruction.
14188 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14189 * @param pCtx The current CPU context.
14190 * @param fSameCtx Set if we have the same context information as the VMM,
14191 * clear if we may have already executed an instruction in
14192 * our debug context. When clear, we assume IEMCPU holds
14193 * valid CPU mode info.
14194 */
14195IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
14196{
14197# ifdef IN_RING3
14198 if (LogIs2Enabled())
14199 {
14200 char szInstr[256];
14201 uint32_t cbInstr = 0;
14202 if (fSameCtx)
14203 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
14204 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
14205 szInstr, sizeof(szInstr), &cbInstr);
14206 else
14207 {
14208 uint32_t fFlags = 0;
14209 switch (pVCpu->iem.s.enmCpuMode)
14210 {
14211 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
14212 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
14213 case IEMMODE_16BIT:
14214 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
14215 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
14216 else
14217 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
14218 break;
14219 }
14220 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
14221 szInstr, sizeof(szInstr), &cbInstr);
14222 }
14223
14224 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
14225 Log2(("****\n"
14226 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
14227 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
14228 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
14229 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
14230 " %s\n"
14231 ,
14232 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
14233 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
14234 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
14235 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
14236 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
14237 szInstr));
14238
14239 if (LogIs3Enabled())
14240 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
14241 }
14242 else
14243# endif
14244 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
14245 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
14246 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
14247}
14248#endif
14249
14250
14251/**
14252 * Makes status code addjustments (pass up from I/O and access handler)
14253 * as well as maintaining statistics.
14254 *
14255 * @returns Strict VBox status code to pass up.
14256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14257 * @param rcStrict The status from executing an instruction.
14258 */
14259DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14260{
14261 if (rcStrict != VINF_SUCCESS)
14262 {
14263 if (RT_SUCCESS(rcStrict))
14264 {
14265 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
14266 || rcStrict == VINF_IOM_R3_IOPORT_READ
14267 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
14268 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
14269 || rcStrict == VINF_IOM_R3_MMIO_READ
14270 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
14271 || rcStrict == VINF_IOM_R3_MMIO_WRITE
14272 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
14273 || rcStrict == VINF_CPUM_R3_MSR_READ
14274 || rcStrict == VINF_CPUM_R3_MSR_WRITE
14275 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
14276 || rcStrict == VINF_EM_RAW_TO_R3
14277 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
14278 /* raw-mode / virt handlers only: */
14279 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
14280 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
14281 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
14282 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
14283 || rcStrict == VINF_SELM_SYNC_GDT
14284 || rcStrict == VINF_CSAM_PENDING_ACTION
14285 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
14286 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
14287/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
14288 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
14289 if (rcPassUp == VINF_SUCCESS)
14290 pVCpu->iem.s.cRetInfStatuses++;
14291 else if ( rcPassUp < VINF_EM_FIRST
14292 || rcPassUp > VINF_EM_LAST
14293 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
14294 {
14295 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14296 pVCpu->iem.s.cRetPassUpStatus++;
14297 rcStrict = rcPassUp;
14298 }
14299 else
14300 {
14301 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
14302 pVCpu->iem.s.cRetInfStatuses++;
14303 }
14304 }
14305 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
14306 pVCpu->iem.s.cRetAspectNotImplemented++;
14307 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
14308 pVCpu->iem.s.cRetInstrNotImplemented++;
14309#ifdef IEM_VERIFICATION_MODE_FULL
14310 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)
14311 rcStrict = VINF_SUCCESS;
14312#endif
14313 else
14314 pVCpu->iem.s.cRetErrStatuses++;
14315 }
14316 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
14317 {
14318 pVCpu->iem.s.cRetPassUpStatus++;
14319 rcStrict = pVCpu->iem.s.rcPassUp;
14320 }
14321
14322 return rcStrict;
14323}
14324
14325
14326/**
14327 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
14328 * IEMExecOneWithPrefetchedByPC.
14329 *
14330 * Similar code is found in IEMExecLots.
14331 *
14332 * @return Strict VBox status code.
14333 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14335 * @param fExecuteInhibit If set, execute the instruction following CLI,
14336 * POP SS and MOV SS,GR.
14337 */
14338DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
14339{
14340#ifdef IEM_WITH_SETJMP
14341 VBOXSTRICTRC rcStrict;
14342 jmp_buf JmpBuf;
14343 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14344 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14345 if ((rcStrict = setjmp(JmpBuf)) == 0)
14346 {
14347 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14348 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14349 }
14350 else
14351 pVCpu->iem.s.cLongJumps++;
14352 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14353#else
14354 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14355 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14356#endif
14357 if (rcStrict == VINF_SUCCESS)
14358 pVCpu->iem.s.cInstructions++;
14359 if (pVCpu->iem.s.cActiveMappings > 0)
14360 {
14361 Assert(rcStrict != VINF_SUCCESS);
14362 iemMemRollback(pVCpu);
14363 }
14364//#ifdef DEBUG
14365// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14366//#endif
14367
14368 /* Execute the next instruction as well if a cli, pop ss or
14369 mov ss, Gr has just completed successfully. */
14370 if ( fExecuteInhibit
14371 && rcStrict == VINF_SUCCESS
14372 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14373 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
14374 {
14375 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14376 if (rcStrict == VINF_SUCCESS)
14377 {
14378#ifdef LOG_ENABLED
14379 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
14380#endif
14381#ifdef IEM_WITH_SETJMP
14382 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14383 if ((rcStrict = setjmp(JmpBuf)) == 0)
14384 {
14385 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14386 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14387 }
14388 else
14389 pVCpu->iem.s.cLongJumps++;
14390 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14391#else
14392 IEM_OPCODE_GET_NEXT_U8(&b);
14393 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14394#endif
14395 if (rcStrict == VINF_SUCCESS)
14396 pVCpu->iem.s.cInstructions++;
14397 if (pVCpu->iem.s.cActiveMappings > 0)
14398 {
14399 Assert(rcStrict != VINF_SUCCESS);
14400 iemMemRollback(pVCpu);
14401 }
14402 }
14403 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14404 }
14405
14406 /*
14407 * Return value fiddling, statistics and sanity assertions.
14408 */
14409 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14410
14411 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14412 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14413#if defined(IEM_VERIFICATION_MODE_FULL)
14414 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14415 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14416 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14417 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14418#endif
14419 return rcStrict;
14420}
14421
14422
14423#ifdef IN_RC
14424/**
14425 * Re-enters raw-mode or ensure we return to ring-3.
14426 *
14427 * @returns rcStrict, maybe modified.
14428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14429 * @param pCtx The current CPU context.
14430 * @param rcStrict The status code returne by the interpreter.
14431 */
14432DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
14433{
14434 if ( !pVCpu->iem.s.fInPatchCode
14435 && ( rcStrict == VINF_SUCCESS
14436 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14437 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14438 {
14439 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14440 CPUMRawEnter(pVCpu);
14441 else
14442 {
14443 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14444 rcStrict = VINF_EM_RESCHEDULE;
14445 }
14446 }
14447 return rcStrict;
14448}
14449#endif
14450
14451
14452/**
14453 * Execute one instruction.
14454 *
14455 * @return Strict VBox status code.
14456 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14457 */
14458VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14459{
14460#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14461 if (++pVCpu->iem.s.cVerifyDepth == 1)
14462 iemExecVerificationModeSetup(pVCpu);
14463#endif
14464#ifdef LOG_ENABLED
14465 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14466 iemLogCurInstr(pVCpu, pCtx, true);
14467#endif
14468
14469 /*
14470 * Do the decoding and emulation.
14471 */
14472 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14473 if (rcStrict == VINF_SUCCESS)
14474 rcStrict = iemExecOneInner(pVCpu, true);
14475
14476#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14477 /*
14478 * Assert some sanity.
14479 */
14480 if (pVCpu->iem.s.cVerifyDepth == 1)
14481 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14482 pVCpu->iem.s.cVerifyDepth--;
14483#endif
14484#ifdef IN_RC
14485 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14486#endif
14487 if (rcStrict != VINF_SUCCESS)
14488 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14489 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14490 return rcStrict;
14491}
14492
14493
14494VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14495{
14496 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14497 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14498
14499 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14500 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14501 if (rcStrict == VINF_SUCCESS)
14502 {
14503 rcStrict = iemExecOneInner(pVCpu, true);
14504 if (pcbWritten)
14505 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14506 }
14507
14508#ifdef IN_RC
14509 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14510#endif
14511 return rcStrict;
14512}
14513
14514
14515VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14516 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14517{
14518 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14519 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14520
14521 VBOXSTRICTRC rcStrict;
14522 if ( cbOpcodeBytes
14523 && pCtx->rip == OpcodeBytesPC)
14524 {
14525 iemInitDecoder(pVCpu, false);
14526#ifdef IEM_WITH_CODE_TLB
14527 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14528 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14529 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14530 pVCpu->iem.s.offCurInstrStart = 0;
14531 pVCpu->iem.s.offInstrNextByte = 0;
14532#else
14533 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14534 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14535#endif
14536 rcStrict = VINF_SUCCESS;
14537 }
14538 else
14539 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14540 if (rcStrict == VINF_SUCCESS)
14541 {
14542 rcStrict = iemExecOneInner(pVCpu, true);
14543 }
14544
14545#ifdef IN_RC
14546 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14547#endif
14548 return rcStrict;
14549}
14550
14551
14552VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14553{
14554 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14555 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14556
14557 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14558 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14559 if (rcStrict == VINF_SUCCESS)
14560 {
14561 rcStrict = iemExecOneInner(pVCpu, false);
14562 if (pcbWritten)
14563 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14564 }
14565
14566#ifdef IN_RC
14567 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14568#endif
14569 return rcStrict;
14570}
14571
14572
14573VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14574 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14575{
14576 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14577 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14578
14579 VBOXSTRICTRC rcStrict;
14580 if ( cbOpcodeBytes
14581 && pCtx->rip == OpcodeBytesPC)
14582 {
14583 iemInitDecoder(pVCpu, true);
14584#ifdef IEM_WITH_CODE_TLB
14585 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14586 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14587 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14588 pVCpu->iem.s.offCurInstrStart = 0;
14589 pVCpu->iem.s.offInstrNextByte = 0;
14590#else
14591 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14592 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14593#endif
14594 rcStrict = VINF_SUCCESS;
14595 }
14596 else
14597 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14598 if (rcStrict == VINF_SUCCESS)
14599 rcStrict = iemExecOneInner(pVCpu, false);
14600
14601#ifdef IN_RC
14602 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14603#endif
14604 return rcStrict;
14605}
14606
14607
14608/**
14609 * For debugging DISGetParamSize, may come in handy.
14610 *
14611 * @returns Strict VBox status code.
14612 * @param pVCpu The cross context virtual CPU structure of the
14613 * calling EMT.
14614 * @param pCtxCore The context core structure.
14615 * @param OpcodeBytesPC The PC of the opcode bytes.
14616 * @param pvOpcodeBytes Prefeched opcode bytes.
14617 * @param cbOpcodeBytes Number of prefetched bytes.
14618 * @param pcbWritten Where to return the number of bytes written.
14619 * Optional.
14620 */
14621VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14622 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14623 uint32_t *pcbWritten)
14624{
14625 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14626 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14627
14628 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14629 VBOXSTRICTRC rcStrict;
14630 if ( cbOpcodeBytes
14631 && pCtx->rip == OpcodeBytesPC)
14632 {
14633 iemInitDecoder(pVCpu, true);
14634#ifdef IEM_WITH_CODE_TLB
14635 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14636 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14637 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14638 pVCpu->iem.s.offCurInstrStart = 0;
14639 pVCpu->iem.s.offInstrNextByte = 0;
14640#else
14641 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14642 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14643#endif
14644 rcStrict = VINF_SUCCESS;
14645 }
14646 else
14647 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14648 if (rcStrict == VINF_SUCCESS)
14649 {
14650 rcStrict = iemExecOneInner(pVCpu, false);
14651 if (pcbWritten)
14652 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14653 }
14654
14655#ifdef IN_RC
14656 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14657#endif
14658 return rcStrict;
14659}
14660
14661
14662VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14663{
14664 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14665
14666#if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)
14667 /*
14668 * See if there is an interrupt pending in TRPM, inject it if we can.
14669 */
14670 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14671# ifdef IEM_VERIFICATION_MODE_FULL
14672 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14673# endif
14674 if ( pCtx->eflags.Bits.u1IF
14675 && TRPMHasTrap(pVCpu)
14676 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14677 {
14678 uint8_t u8TrapNo;
14679 TRPMEVENT enmType;
14680 RTGCUINT uErrCode;
14681 RTGCPTR uCr2;
14682 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14683 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14684 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14685 TRPMResetTrap(pVCpu);
14686 }
14687
14688 /*
14689 * Log the state.
14690 */
14691# ifdef LOG_ENABLED
14692 iemLogCurInstr(pVCpu, pCtx, true);
14693# endif
14694
14695 /*
14696 * Do the decoding and emulation.
14697 */
14698 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14699 if (rcStrict == VINF_SUCCESS)
14700 rcStrict = iemExecOneInner(pVCpu, true);
14701
14702 /*
14703 * Assert some sanity.
14704 */
14705 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);
14706
14707 /*
14708 * Log and return.
14709 */
14710 if (rcStrict != VINF_SUCCESS)
14711 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14712 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14713 if (pcInstructions)
14714 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14715 return rcStrict;
14716
14717#else /* Not verification mode */
14718
14719 /*
14720 * See if there is an interrupt pending in TRPM, inject it if we can.
14721 */
14722 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14723# ifdef IEM_VERIFICATION_MODE_FULL
14724 pVCpu->iem.s.uInjectCpl = UINT8_MAX;
14725# endif
14726 if ( pCtx->eflags.Bits.u1IF
14727 && TRPMHasTrap(pVCpu)
14728 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14729 {
14730 uint8_t u8TrapNo;
14731 TRPMEVENT enmType;
14732 RTGCUINT uErrCode;
14733 RTGCPTR uCr2;
14734 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14735 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14736 if (!IEM_VERIFICATION_ENABLED(pVCpu))
14737 TRPMResetTrap(pVCpu);
14738 }
14739
14740 /*
14741 * Initial decoder init w/ prefetch, then setup setjmp.
14742 */
14743 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14744 if (rcStrict == VINF_SUCCESS)
14745 {
14746# ifdef IEM_WITH_SETJMP
14747 jmp_buf JmpBuf;
14748 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14749 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14750 pVCpu->iem.s.cActiveMappings = 0;
14751 if ((rcStrict = setjmp(JmpBuf)) == 0)
14752# endif
14753 {
14754 /*
14755 * The run loop. We limit ourselves to 4096 instructions right now.
14756 */
14757 PVM pVM = pVCpu->CTX_SUFF(pVM);
14758 uint32_t cInstr = 4096;
14759 for (;;)
14760 {
14761 /*
14762 * Log the state.
14763 */
14764# ifdef LOG_ENABLED
14765 iemLogCurInstr(pVCpu, pCtx, true);
14766# endif
14767
14768 /*
14769 * Do the decoding and emulation.
14770 */
14771 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14772 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14773 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14774 {
14775 Assert(pVCpu->iem.s.cActiveMappings == 0);
14776 pVCpu->iem.s.cInstructions++;
14777 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14778 {
14779 uint32_t fCpu = pVCpu->fLocalForcedActions
14780 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14781 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14782 | VMCPU_FF_TLB_FLUSH
14783# ifdef VBOX_WITH_RAW_MODE
14784 | VMCPU_FF_TRPM_SYNC_IDT
14785 | VMCPU_FF_SELM_SYNC_TSS
14786 | VMCPU_FF_SELM_SYNC_GDT
14787 | VMCPU_FF_SELM_SYNC_LDT
14788# endif
14789 | VMCPU_FF_INHIBIT_INTERRUPTS
14790 | VMCPU_FF_BLOCK_NMIS
14791 | VMCPU_FF_UNHALT ));
14792
14793 if (RT_LIKELY( ( !fCpu
14794 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14795 && !pCtx->rflags.Bits.u1IF) )
14796 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14797 {
14798 if (cInstr-- > 0)
14799 {
14800 Assert(pVCpu->iem.s.cActiveMappings == 0);
14801 iemReInitDecoder(pVCpu);
14802 continue;
14803 }
14804 }
14805 }
14806 Assert(pVCpu->iem.s.cActiveMappings == 0);
14807 }
14808 else if (pVCpu->iem.s.cActiveMappings > 0)
14809 iemMemRollback(pVCpu);
14810 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14811 break;
14812 }
14813 }
14814# ifdef IEM_WITH_SETJMP
14815 else
14816 {
14817 if (pVCpu->iem.s.cActiveMappings > 0)
14818 iemMemRollback(pVCpu);
14819 pVCpu->iem.s.cLongJumps++;
14820 }
14821 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14822# endif
14823
14824 /*
14825 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14826 */
14827 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14828 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14829# if defined(IEM_VERIFICATION_MODE_FULL)
14830 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));
14831 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));
14832 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));
14833 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));
14834# endif
14835 }
14836
14837 /*
14838 * Maybe re-enter raw-mode and log.
14839 */
14840# ifdef IN_RC
14841 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14842# endif
14843 if (rcStrict != VINF_SUCCESS)
14844 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14845 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14846 if (pcInstructions)
14847 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14848 return rcStrict;
14849#endif /* Not verification mode */
14850}
14851
14852
14853
14854/**
14855 * Injects a trap, fault, abort, software interrupt or external interrupt.
14856 *
14857 * The parameter list matches TRPMQueryTrapAll pretty closely.
14858 *
14859 * @returns Strict VBox status code.
14860 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14861 * @param u8TrapNo The trap number.
14862 * @param enmType What type is it (trap/fault/abort), software
14863 * interrupt or hardware interrupt.
14864 * @param uErrCode The error code if applicable.
14865 * @param uCr2 The CR2 value if applicable.
14866 * @param cbInstr The instruction length (only relevant for
14867 * software interrupts).
14868 */
14869VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14870 uint8_t cbInstr)
14871{
14872 iemInitDecoder(pVCpu, false);
14873#ifdef DBGFTRACE_ENABLED
14874 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14875 u8TrapNo, enmType, uErrCode, uCr2);
14876#endif
14877
14878 uint32_t fFlags;
14879 switch (enmType)
14880 {
14881 case TRPM_HARDWARE_INT:
14882 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14883 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14884 uErrCode = uCr2 = 0;
14885 break;
14886
14887 case TRPM_SOFTWARE_INT:
14888 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14889 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14890 uErrCode = uCr2 = 0;
14891 break;
14892
14893 case TRPM_TRAP:
14894 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14895 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14896 if (u8TrapNo == X86_XCPT_PF)
14897 fFlags |= IEM_XCPT_FLAGS_CR2;
14898 switch (u8TrapNo)
14899 {
14900 case X86_XCPT_DF:
14901 case X86_XCPT_TS:
14902 case X86_XCPT_NP:
14903 case X86_XCPT_SS:
14904 case X86_XCPT_PF:
14905 case X86_XCPT_AC:
14906 fFlags |= IEM_XCPT_FLAGS_ERR;
14907 break;
14908
14909 case X86_XCPT_NMI:
14910 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14911 break;
14912 }
14913 break;
14914
14915 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14916 }
14917
14918 return iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14919}
14920
14921
14922/**
14923 * Injects the active TRPM event.
14924 *
14925 * @returns Strict VBox status code.
14926 * @param pVCpu The cross context virtual CPU structure.
14927 */
14928VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14929{
14930#ifndef IEM_IMPLEMENTS_TASKSWITCH
14931 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14932#else
14933 uint8_t u8TrapNo;
14934 TRPMEVENT enmType;
14935 RTGCUINT uErrCode;
14936 RTGCUINTPTR uCr2;
14937 uint8_t cbInstr;
14938 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14939 if (RT_FAILURE(rc))
14940 return rc;
14941
14942 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14943
14944 /** @todo Are there any other codes that imply the event was successfully
14945 * delivered to the guest? See @bugref{6607}. */
14946 if ( rcStrict == VINF_SUCCESS
14947 || rcStrict == VINF_IEM_RAISED_XCPT)
14948 {
14949 TRPMResetTrap(pVCpu);
14950 }
14951 return rcStrict;
14952#endif
14953}
14954
14955
14956VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14957{
14958 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14959 return VERR_NOT_IMPLEMENTED;
14960}
14961
14962
14963VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14964{
14965 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14966 return VERR_NOT_IMPLEMENTED;
14967}
14968
14969
14970#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14971/**
14972 * Executes a IRET instruction with default operand size.
14973 *
14974 * This is for PATM.
14975 *
14976 * @returns VBox status code.
14977 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14978 * @param pCtxCore The register frame.
14979 */
14980VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14981{
14982 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14983
14984 iemCtxCoreToCtx(pCtx, pCtxCore);
14985 iemInitDecoder(pVCpu);
14986 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14987 if (rcStrict == VINF_SUCCESS)
14988 iemCtxToCtxCore(pCtxCore, pCtx);
14989 else
14990 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14991 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14992 return rcStrict;
14993}
14994#endif
14995
14996
14997/**
14998 * Macro used by the IEMExec* method to check the given instruction length.
14999 *
15000 * Will return on failure!
15001 *
15002 * @param a_cbInstr The given instruction length.
15003 * @param a_cbMin The minimum length.
15004 */
15005#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
15006 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
15007 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
15008
15009
15010/**
15011 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
15012 *
15013 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
15014 *
15015 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
15016 * @param pVCpu The cross context virtual CPU structure of the calling thread.
15017 * @param rcStrict The status code to fiddle.
15018 */
15019DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15020{
15021 iemUninitExec(pVCpu);
15022#ifdef IN_RC
15023 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
15024 iemExecStatusCodeFiddling(pVCpu, rcStrict));
15025#else
15026 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15027#endif
15028}
15029
15030
15031/**
15032 * Interface for HM and EM for executing string I/O OUT (write) instructions.
15033 *
15034 * This API ASSUMES that the caller has already verified that the guest code is
15035 * allowed to access the I/O port. (The I/O port is in the DX register in the
15036 * guest state.)
15037 *
15038 * @returns Strict VBox status code.
15039 * @param pVCpu The cross context virtual CPU structure.
15040 * @param cbValue The size of the I/O port access (1, 2, or 4).
15041 * @param enmAddrMode The addressing mode.
15042 * @param fRepPrefix Indicates whether a repeat prefix is used
15043 * (doesn't matter which for this instruction).
15044 * @param cbInstr The instruction length in bytes.
15045 * @param iEffSeg The effective segment address.
15046 * @param fIoChecked Whether the access to the I/O port has been
15047 * checked or not. It's typically checked in the
15048 * HM scenario.
15049 */
15050VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15051 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
15052{
15053 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
15054 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15055
15056 /*
15057 * State init.
15058 */
15059 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15060
15061 /*
15062 * Switch orgy for getting to the right handler.
15063 */
15064 VBOXSTRICTRC rcStrict;
15065 if (fRepPrefix)
15066 {
15067 switch (enmAddrMode)
15068 {
15069 case IEMMODE_16BIT:
15070 switch (cbValue)
15071 {
15072 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15073 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15074 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15075 default:
15076 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15077 }
15078 break;
15079
15080 case IEMMODE_32BIT:
15081 switch (cbValue)
15082 {
15083 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15084 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15085 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15086 default:
15087 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15088 }
15089 break;
15090
15091 case IEMMODE_64BIT:
15092 switch (cbValue)
15093 {
15094 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15095 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15096 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15097 default:
15098 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15099 }
15100 break;
15101
15102 default:
15103 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15104 }
15105 }
15106 else
15107 {
15108 switch (enmAddrMode)
15109 {
15110 case IEMMODE_16BIT:
15111 switch (cbValue)
15112 {
15113 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15114 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15115 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15116 default:
15117 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15118 }
15119 break;
15120
15121 case IEMMODE_32BIT:
15122 switch (cbValue)
15123 {
15124 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15125 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15126 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15127 default:
15128 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15129 }
15130 break;
15131
15132 case IEMMODE_64BIT:
15133 switch (cbValue)
15134 {
15135 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15136 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15137 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
15138 default:
15139 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15140 }
15141 break;
15142
15143 default:
15144 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15145 }
15146 }
15147
15148 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15149}
15150
15151
15152/**
15153 * Interface for HM and EM for executing string I/O IN (read) instructions.
15154 *
15155 * This API ASSUMES that the caller has already verified that the guest code is
15156 * allowed to access the I/O port. (The I/O port is in the DX register in the
15157 * guest state.)
15158 *
15159 * @returns Strict VBox status code.
15160 * @param pVCpu The cross context virtual CPU structure.
15161 * @param cbValue The size of the I/O port access (1, 2, or 4).
15162 * @param enmAddrMode The addressing mode.
15163 * @param fRepPrefix Indicates whether a repeat prefix is used
15164 * (doesn't matter which for this instruction).
15165 * @param cbInstr The instruction length in bytes.
15166 * @param fIoChecked Whether the access to the I/O port has been
15167 * checked or not. It's typically checked in the
15168 * HM scenario.
15169 */
15170VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
15171 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
15172{
15173 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15174
15175 /*
15176 * State init.
15177 */
15178 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15179
15180 /*
15181 * Switch orgy for getting to the right handler.
15182 */
15183 VBOXSTRICTRC rcStrict;
15184 if (fRepPrefix)
15185 {
15186 switch (enmAddrMode)
15187 {
15188 case IEMMODE_16BIT:
15189 switch (cbValue)
15190 {
15191 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15192 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15193 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15194 default:
15195 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15196 }
15197 break;
15198
15199 case IEMMODE_32BIT:
15200 switch (cbValue)
15201 {
15202 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15203 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15204 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15205 default:
15206 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15207 }
15208 break;
15209
15210 case IEMMODE_64BIT:
15211 switch (cbValue)
15212 {
15213 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15214 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15215 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15216 default:
15217 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15218 }
15219 break;
15220
15221 default:
15222 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15223 }
15224 }
15225 else
15226 {
15227 switch (enmAddrMode)
15228 {
15229 case IEMMODE_16BIT:
15230 switch (cbValue)
15231 {
15232 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15233 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15234 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15235 default:
15236 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15237 }
15238 break;
15239
15240 case IEMMODE_32BIT:
15241 switch (cbValue)
15242 {
15243 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15244 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15245 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15246 default:
15247 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15248 }
15249 break;
15250
15251 case IEMMODE_64BIT:
15252 switch (cbValue)
15253 {
15254 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15255 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15256 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15257 default:
15258 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15259 }
15260 break;
15261
15262 default:
15263 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15264 }
15265 }
15266
15267 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15268}
15269
15270
15271/**
15272 * Interface for rawmode to write execute an OUT instruction.
15273 *
15274 * @returns Strict VBox status code.
15275 * @param pVCpu The cross context virtual CPU structure.
15276 * @param cbInstr The instruction length in bytes.
15277 * @param u16Port The port to read.
15278 * @param cbReg The register size.
15279 *
15280 * @remarks In ring-0 not all of the state needs to be synced in.
15281 */
15282VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15283{
15284 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15285 Assert(cbReg <= 4 && cbReg != 3);
15286
15287 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15288 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
15289 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15290}
15291
15292
15293/**
15294 * Interface for rawmode to write execute an IN instruction.
15295 *
15296 * @returns Strict VBox status code.
15297 * @param pVCpu The cross context virtual CPU structure.
15298 * @param cbInstr The instruction length in bytes.
15299 * @param u16Port The port to read.
15300 * @param cbReg The register size.
15301 */
15302VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
15303{
15304 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15305 Assert(cbReg <= 4 && cbReg != 3);
15306
15307 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15308 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
15309 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15310}
15311
15312
15313/**
15314 * Interface for HM and EM to write to a CRx register.
15315 *
15316 * @returns Strict VBox status code.
15317 * @param pVCpu The cross context virtual CPU structure.
15318 * @param cbInstr The instruction length in bytes.
15319 * @param iCrReg The control register number (destination).
15320 * @param iGReg The general purpose register number (source).
15321 *
15322 * @remarks In ring-0 not all of the state needs to be synced in.
15323 */
15324VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15325{
15326 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15327 Assert(iCrReg < 16);
15328 Assert(iGReg < 16);
15329
15330 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15331 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15332 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15333}
15334
15335
15336/**
15337 * Interface for HM and EM to read from a CRx register.
15338 *
15339 * @returns Strict VBox status code.
15340 * @param pVCpu The cross context virtual CPU structure.
15341 * @param cbInstr The instruction length in bytes.
15342 * @param iGReg The general purpose register number (destination).
15343 * @param iCrReg The control register number (source).
15344 *
15345 * @remarks In ring-0 not all of the state needs to be synced in.
15346 */
15347VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15348{
15349 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15350 Assert(iCrReg < 16);
15351 Assert(iGReg < 16);
15352
15353 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15354 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15355 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15356}
15357
15358
15359/**
15360 * Interface for HM and EM to clear the CR0[TS] bit.
15361 *
15362 * @returns Strict VBox status code.
15363 * @param pVCpu The cross context virtual CPU structure.
15364 * @param cbInstr The instruction length in bytes.
15365 *
15366 * @remarks In ring-0 not all of the state needs to be synced in.
15367 */
15368VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15369{
15370 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15371
15372 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15373 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15374 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15375}
15376
15377
15378/**
15379 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15380 *
15381 * @returns Strict VBox status code.
15382 * @param pVCpu The cross context virtual CPU structure.
15383 * @param cbInstr The instruction length in bytes.
15384 * @param uValue The value to load into CR0.
15385 *
15386 * @remarks In ring-0 not all of the state needs to be synced in.
15387 */
15388VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15389{
15390 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15391
15392 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15393 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15394 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15395}
15396
15397
15398/**
15399 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15400 *
15401 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15402 *
15403 * @returns Strict VBox status code.
15404 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15405 * @param cbInstr The instruction length in bytes.
15406 * @remarks In ring-0 not all of the state needs to be synced in.
15407 * @thread EMT(pVCpu)
15408 */
15409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15410{
15411 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15412
15413 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15414 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15415 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15416}
15417
15418
15419/**
15420 * Checks if IEM is in the process of delivering an event (interrupt or
15421 * exception).
15422 *
15423 * @returns true if we're in the process of raising an interrupt or exception,
15424 * false otherwise.
15425 * @param pVCpu The cross context virtual CPU structure.
15426 * @param puVector Where to store the vector associated with the
15427 * currently delivered event, optional.
15428 * @param pfFlags Where to store th event delivery flags (see
15429 * IEM_XCPT_FLAGS_XXX), optional.
15430 * @param puErr Where to store the error code associated with the
15431 * event, optional.
15432 * @param puCr2 Where to store the CR2 associated with the event,
15433 * optional.
15434 * @remarks The caller should check the flags to determine if the error code and
15435 * CR2 are valid for the event.
15436 */
15437VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15438{
15439 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15440 if (fRaisingXcpt)
15441 {
15442 if (puVector)
15443 *puVector = pVCpu->iem.s.uCurXcpt;
15444 if (pfFlags)
15445 *pfFlags = pVCpu->iem.s.fCurXcpt;
15446 if (puErr)
15447 *puErr = pVCpu->iem.s.uCurXcptErr;
15448 if (puCr2)
15449 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15450 }
15451 return fRaisingXcpt;
15452}
15453
15454
15455#ifdef VBOX_WITH_NESTED_HWVIRT
15456/**
15457 * Interface for HM and EM to emulate the STGI instruction.
15458 *
15459 * @returns Strict VBox status code.
15460 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15461 * @param cbInstr The instruction length in bytes.
15462 * @thread EMT(pVCpu)
15463 */
15464VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15465{
15466 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15467
15468 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15469 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15470 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15471}
15472
15473
15474/**
15475 * Interface for HM and EM to emulate the STGI instruction.
15476 *
15477 * @returns Strict VBox status code.
15478 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15479 * @param cbInstr The instruction length in bytes.
15480 * @thread EMT(pVCpu)
15481 */
15482VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15483{
15484 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15485
15486 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15487 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15488 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15489}
15490
15491
15492/**
15493 * Interface for HM and EM to emulate the VMLOAD instruction.
15494 *
15495 * @returns Strict VBox status code.
15496 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15497 * @param cbInstr The instruction length in bytes.
15498 * @thread EMT(pVCpu)
15499 */
15500VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15501{
15502 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15503
15504 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15505 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15506 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15507}
15508
15509
15510/**
15511 * Interface for HM and EM to emulate the VMSAVE instruction.
15512 *
15513 * @returns Strict VBox status code.
15514 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15515 * @param cbInstr The instruction length in bytes.
15516 * @thread EMT(pVCpu)
15517 */
15518VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15519{
15520 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15521
15522 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15523 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15524 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15525}
15526
15527
15528/**
15529 * Interface for HM and EM to emulate the INVLPGA instruction.
15530 *
15531 * @returns Strict VBox status code.
15532 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15533 * @param cbInstr The instruction length in bytes.
15534 * @thread EMT(pVCpu)
15535 */
15536VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15537{
15538 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15539
15540 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15541 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15542 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15543}
15544#endif /* VBOX_WITH_NESTED_HWVIRT */
15545
15546#ifdef IN_RING3
15547
15548/**
15549 * Handles the unlikely and probably fatal merge cases.
15550 *
15551 * @returns Merged status code.
15552 * @param rcStrict Current EM status code.
15553 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15554 * with @a rcStrict.
15555 * @param iMemMap The memory mapping index. For error reporting only.
15556 * @param pVCpu The cross context virtual CPU structure of the calling
15557 * thread, for error reporting only.
15558 */
15559DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15560 unsigned iMemMap, PVMCPU pVCpu)
15561{
15562 if (RT_FAILURE_NP(rcStrict))
15563 return rcStrict;
15564
15565 if (RT_FAILURE_NP(rcStrictCommit))
15566 return rcStrictCommit;
15567
15568 if (rcStrict == rcStrictCommit)
15569 return rcStrictCommit;
15570
15571 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15572 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15573 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15574 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15575 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15576 return VERR_IOM_FF_STATUS_IPE;
15577}
15578
15579
15580/**
15581 * Helper for IOMR3ProcessForceFlag.
15582 *
15583 * @returns Merged status code.
15584 * @param rcStrict Current EM status code.
15585 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15586 * with @a rcStrict.
15587 * @param iMemMap The memory mapping index. For error reporting only.
15588 * @param pVCpu The cross context virtual CPU structure of the calling
15589 * thread, for error reporting only.
15590 */
15591DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15592{
15593 /* Simple. */
15594 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15595 return rcStrictCommit;
15596
15597 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15598 return rcStrict;
15599
15600 /* EM scheduling status codes. */
15601 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15602 && rcStrict <= VINF_EM_LAST))
15603 {
15604 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15605 && rcStrictCommit <= VINF_EM_LAST))
15606 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15607 }
15608
15609 /* Unlikely */
15610 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15611}
15612
15613
15614/**
15615 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15616 *
15617 * @returns Merge between @a rcStrict and what the commit operation returned.
15618 * @param pVM The cross context VM structure.
15619 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15620 * @param rcStrict The status code returned by ring-0 or raw-mode.
15621 */
15622VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15623{
15624 /*
15625 * Reset the pending commit.
15626 */
15627 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15628 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15629 ("%#x %#x %#x\n",
15630 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15631 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15632
15633 /*
15634 * Commit the pending bounce buffers (usually just one).
15635 */
15636 unsigned cBufs = 0;
15637 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15638 while (iMemMap-- > 0)
15639 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15640 {
15641 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15642 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15643 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15644
15645 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15646 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15647 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15648
15649 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15650 {
15651 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15652 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15653 pbBuf,
15654 cbFirst,
15655 PGMACCESSORIGIN_IEM);
15656 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15657 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15658 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15659 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15660 }
15661
15662 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15663 {
15664 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15665 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15666 pbBuf + cbFirst,
15667 cbSecond,
15668 PGMACCESSORIGIN_IEM);
15669 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15670 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15671 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15672 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15673 }
15674 cBufs++;
15675 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15676 }
15677
15678 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15679 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15680 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15681 pVCpu->iem.s.cActiveMappings = 0;
15682 return rcStrict;
15683}
15684
15685#endif /* IN_RING3 */
15686
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette