VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 72494

Last change on this file since 72494 was 72494, checked in by vboxsync, 6 years ago

IEM: Dropped the IEMCPU::pCtxR3, IEMCPU::pCtxR0, and IEMCPU::pCtxRC members.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 596.1 KB
Line 
1/* $Id: IEMAll.cpp 72494 2018-06-10 16:16:36Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in real mode.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
335 * @returns PCCPUMFEATURES
336 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
337 */
338#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
339
340/**
341 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
342 * @returns PCCPUMFEATURES
343 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
344 */
345#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
346
347/**
348 * Evaluates to true if we're presenting an Intel CPU to the guest.
349 */
350#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
351
352/**
353 * Evaluates to true if we're presenting an AMD CPU to the guest.
354 */
355#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
356
357/**
358 * Check if the address is canonical.
359 */
360#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
361
362/**
363 * Gets the effective VEX.VVVV value.
364 *
365 * The 4th bit is ignored if not 64-bit code.
366 * @returns effective V-register value.
367 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
368 */
369#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
370 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
371
372/** @def IEM_USE_UNALIGNED_DATA_ACCESS
373 * Use unaligned accesses instead of elaborate byte assembly. */
374#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
375# define IEM_USE_UNALIGNED_DATA_ACCESS
376#endif
377
378#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
379/**
380 * Check the common SVM instruction preconditions.
381 */
382# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
383 do { \
384 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
385 { \
386 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
387 return iemRaiseUndefinedOpcode(pVCpu); \
388 } \
389 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
390 { \
391 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
392 return iemRaiseUndefinedOpcode(pVCpu); \
393 } \
394 if (pVCpu->iem.s.uCpl != 0) \
395 { \
396 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
397 return iemRaiseGeneralProtectionFault0(pVCpu); \
398 } \
399 } while (0)
400
401/**
402 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
403 */
404# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
405 do { \
406 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
407 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
408 } while (0)
409
410/**
411 * Check if an SVM is enabled.
412 */
413# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
414
415/**
416 * Check if an SVM control/instruction intercept is set.
417 */
418# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
419
420/**
421 * Check if an SVM read CRx intercept is set.
422 */
423# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
424
425/**
426 * Check if an SVM write CRx intercept is set.
427 */
428# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
429
430/**
431 * Check if an SVM read DRx intercept is set.
432 */
433# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
434
435/**
436 * Check if an SVM write DRx intercept is set.
437 */
438# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
439
440/**
441 * Check if an SVM exception intercept is set.
442 */
443# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
444
445/**
446 * Get the SVM pause-filter count.
447 */
448# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
449
450/**
451 * Invokes the SVM \#VMEXIT handler for the nested-guest.
452 */
453# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
454 do \
455 { \
456 return iemSvmVmexit((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); \
457 } while (0)
458
459/**
460 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
461 * corresponding decode assist information.
462 */
463# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
464 do \
465 { \
466 uint64_t uExitInfo1; \
467 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
468 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
469 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
470 else \
471 uExitInfo1 = 0; \
472 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
473 } while (0)
474
475#else
476# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
477# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
478# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
479# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
480# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
481# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
482# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
483# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
484# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
485# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
486# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
487# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
488
489#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
490
491
492/*********************************************************************************************************************************
493* Global Variables *
494*********************************************************************************************************************************/
495extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
496
497
498/** Function table for the ADD instruction. */
499IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
500{
501 iemAImpl_add_u8, iemAImpl_add_u8_locked,
502 iemAImpl_add_u16, iemAImpl_add_u16_locked,
503 iemAImpl_add_u32, iemAImpl_add_u32_locked,
504 iemAImpl_add_u64, iemAImpl_add_u64_locked
505};
506
507/** Function table for the ADC instruction. */
508IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
509{
510 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
511 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
512 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
513 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
514};
515
516/** Function table for the SUB instruction. */
517IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
518{
519 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
520 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
521 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
522 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
523};
524
525/** Function table for the SBB instruction. */
526IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
527{
528 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
529 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
530 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
531 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
532};
533
534/** Function table for the OR instruction. */
535IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
536{
537 iemAImpl_or_u8, iemAImpl_or_u8_locked,
538 iemAImpl_or_u16, iemAImpl_or_u16_locked,
539 iemAImpl_or_u32, iemAImpl_or_u32_locked,
540 iemAImpl_or_u64, iemAImpl_or_u64_locked
541};
542
543/** Function table for the XOR instruction. */
544IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
545{
546 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
547 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
548 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
549 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
550};
551
552/** Function table for the AND instruction. */
553IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
554{
555 iemAImpl_and_u8, iemAImpl_and_u8_locked,
556 iemAImpl_and_u16, iemAImpl_and_u16_locked,
557 iemAImpl_and_u32, iemAImpl_and_u32_locked,
558 iemAImpl_and_u64, iemAImpl_and_u64_locked
559};
560
561/** Function table for the CMP instruction.
562 * @remarks Making operand order ASSUMPTIONS.
563 */
564IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
565{
566 iemAImpl_cmp_u8, NULL,
567 iemAImpl_cmp_u16, NULL,
568 iemAImpl_cmp_u32, NULL,
569 iemAImpl_cmp_u64, NULL
570};
571
572/** Function table for the TEST instruction.
573 * @remarks Making operand order ASSUMPTIONS.
574 */
575IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
576{
577 iemAImpl_test_u8, NULL,
578 iemAImpl_test_u16, NULL,
579 iemAImpl_test_u32, NULL,
580 iemAImpl_test_u64, NULL
581};
582
583/** Function table for the BT instruction. */
584IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
585{
586 NULL, NULL,
587 iemAImpl_bt_u16, NULL,
588 iemAImpl_bt_u32, NULL,
589 iemAImpl_bt_u64, NULL
590};
591
592/** Function table for the BTC instruction. */
593IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
594{
595 NULL, NULL,
596 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
597 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
598 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
599};
600
601/** Function table for the BTR instruction. */
602IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
603{
604 NULL, NULL,
605 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
606 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
607 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
608};
609
610/** Function table for the BTS instruction. */
611IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
612{
613 NULL, NULL,
614 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
615 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
616 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
617};
618
619/** Function table for the BSF instruction. */
620IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
621{
622 NULL, NULL,
623 iemAImpl_bsf_u16, NULL,
624 iemAImpl_bsf_u32, NULL,
625 iemAImpl_bsf_u64, NULL
626};
627
628/** Function table for the BSR instruction. */
629IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
630{
631 NULL, NULL,
632 iemAImpl_bsr_u16, NULL,
633 iemAImpl_bsr_u32, NULL,
634 iemAImpl_bsr_u64, NULL
635};
636
637/** Function table for the IMUL instruction. */
638IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
639{
640 NULL, NULL,
641 iemAImpl_imul_two_u16, NULL,
642 iemAImpl_imul_two_u32, NULL,
643 iemAImpl_imul_two_u64, NULL
644};
645
646/** Group 1 /r lookup table. */
647IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
648{
649 &g_iemAImpl_add,
650 &g_iemAImpl_or,
651 &g_iemAImpl_adc,
652 &g_iemAImpl_sbb,
653 &g_iemAImpl_and,
654 &g_iemAImpl_sub,
655 &g_iemAImpl_xor,
656 &g_iemAImpl_cmp
657};
658
659/** Function table for the INC instruction. */
660IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
661{
662 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
663 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
664 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
665 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
666};
667
668/** Function table for the DEC instruction. */
669IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
670{
671 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
672 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
673 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
674 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
675};
676
677/** Function table for the NEG instruction. */
678IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
679{
680 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
681 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
682 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
683 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
684};
685
686/** Function table for the NOT instruction. */
687IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
688{
689 iemAImpl_not_u8, iemAImpl_not_u8_locked,
690 iemAImpl_not_u16, iemAImpl_not_u16_locked,
691 iemAImpl_not_u32, iemAImpl_not_u32_locked,
692 iemAImpl_not_u64, iemAImpl_not_u64_locked
693};
694
695
696/** Function table for the ROL instruction. */
697IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
698{
699 iemAImpl_rol_u8,
700 iemAImpl_rol_u16,
701 iemAImpl_rol_u32,
702 iemAImpl_rol_u64
703};
704
705/** Function table for the ROR instruction. */
706IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
707{
708 iemAImpl_ror_u8,
709 iemAImpl_ror_u16,
710 iemAImpl_ror_u32,
711 iemAImpl_ror_u64
712};
713
714/** Function table for the RCL instruction. */
715IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
716{
717 iemAImpl_rcl_u8,
718 iemAImpl_rcl_u16,
719 iemAImpl_rcl_u32,
720 iemAImpl_rcl_u64
721};
722
723/** Function table for the RCR instruction. */
724IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
725{
726 iemAImpl_rcr_u8,
727 iemAImpl_rcr_u16,
728 iemAImpl_rcr_u32,
729 iemAImpl_rcr_u64
730};
731
732/** Function table for the SHL instruction. */
733IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
734{
735 iemAImpl_shl_u8,
736 iemAImpl_shl_u16,
737 iemAImpl_shl_u32,
738 iemAImpl_shl_u64
739};
740
741/** Function table for the SHR instruction. */
742IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
743{
744 iemAImpl_shr_u8,
745 iemAImpl_shr_u16,
746 iemAImpl_shr_u32,
747 iemAImpl_shr_u64
748};
749
750/** Function table for the SAR instruction. */
751IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
752{
753 iemAImpl_sar_u8,
754 iemAImpl_sar_u16,
755 iemAImpl_sar_u32,
756 iemAImpl_sar_u64
757};
758
759
760/** Function table for the MUL instruction. */
761IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
762{
763 iemAImpl_mul_u8,
764 iemAImpl_mul_u16,
765 iemAImpl_mul_u32,
766 iemAImpl_mul_u64
767};
768
769/** Function table for the IMUL instruction working implicitly on rAX. */
770IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
771{
772 iemAImpl_imul_u8,
773 iemAImpl_imul_u16,
774 iemAImpl_imul_u32,
775 iemAImpl_imul_u64
776};
777
778/** Function table for the DIV instruction. */
779IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
780{
781 iemAImpl_div_u8,
782 iemAImpl_div_u16,
783 iemAImpl_div_u32,
784 iemAImpl_div_u64
785};
786
787/** Function table for the MUL instruction. */
788IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
789{
790 iemAImpl_idiv_u8,
791 iemAImpl_idiv_u16,
792 iemAImpl_idiv_u32,
793 iemAImpl_idiv_u64
794};
795
796/** Function table for the SHLD instruction */
797IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
798{
799 iemAImpl_shld_u16,
800 iemAImpl_shld_u32,
801 iemAImpl_shld_u64,
802};
803
804/** Function table for the SHRD instruction */
805IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
806{
807 iemAImpl_shrd_u16,
808 iemAImpl_shrd_u32,
809 iemAImpl_shrd_u64,
810};
811
812
813/** Function table for the PUNPCKLBW instruction */
814IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
815/** Function table for the PUNPCKLBD instruction */
816IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
817/** Function table for the PUNPCKLDQ instruction */
818IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
819/** Function table for the PUNPCKLQDQ instruction */
820IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
821
822/** Function table for the PUNPCKHBW instruction */
823IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
824/** Function table for the PUNPCKHBD instruction */
825IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
826/** Function table for the PUNPCKHDQ instruction */
827IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
828/** Function table for the PUNPCKHQDQ instruction */
829IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
830
831/** Function table for the PXOR instruction */
832IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
833/** Function table for the PCMPEQB instruction */
834IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
835/** Function table for the PCMPEQW instruction */
836IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
837/** Function table for the PCMPEQD instruction */
838IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
839
840
841#if defined(IEM_LOG_MEMORY_WRITES)
842/** What IEM just wrote. */
843uint8_t g_abIemWrote[256];
844/** How much IEM just wrote. */
845size_t g_cbIemWrote;
846#endif
847
848
849/*********************************************************************************************************************************
850* Internal Functions *
851*********************************************************************************************************************************/
852IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
853IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
854IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
855IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
856/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
857IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
858IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
859IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
860IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
861IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
862IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
863IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
864IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
865IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
866IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
867IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
868IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
869#ifdef IEM_WITH_SETJMP
870DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
871DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
872DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
873DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
874DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
875#endif
876
877IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
878IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
879IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
880IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
881IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
882IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
883IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
884IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
885IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
886IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
887IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
888IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
889IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
890IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
891IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
892IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
893IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
894
895#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
896IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1,
897 uint64_t uExitInfo2);
898IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, PCPUMCTX pCtx, uint8_t u8Vector, uint32_t fFlags,
899 uint32_t uErr, uint64_t uCr2);
900#endif
901
902/**
903 * Sets the pass up status.
904 *
905 * @returns VINF_SUCCESS.
906 * @param pVCpu The cross context virtual CPU structure of the
907 * calling thread.
908 * @param rcPassUp The pass up status. Must be informational.
909 * VINF_SUCCESS is not allowed.
910 */
911IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
912{
913 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
914
915 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
916 if (rcOldPassUp == VINF_SUCCESS)
917 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
918 /* If both are EM scheduling codes, use EM priority rules. */
919 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
920 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
921 {
922 if (rcPassUp < rcOldPassUp)
923 {
924 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
925 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
926 }
927 else
928 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
929 }
930 /* Override EM scheduling with specific status code. */
931 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
932 {
933 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
934 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
935 }
936 /* Don't override specific status code, first come first served. */
937 else
938 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
939 return VINF_SUCCESS;
940}
941
942
943/**
944 * Calculates the CPU mode.
945 *
946 * This is mainly for updating IEMCPU::enmCpuMode.
947 *
948 * @returns CPU mode.
949 * @param pCtx The register context for the CPU.
950 */
951DECLINLINE(IEMMODE) iemCalcCpuMode(PCPUMCTX pCtx)
952{
953 if (CPUMIsGuestIn64BitCodeEx(pCtx))
954 return IEMMODE_64BIT;
955 if (pCtx->cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
956 return IEMMODE_32BIT;
957 return IEMMODE_16BIT;
958}
959
960
961/**
962 * Initializes the execution state.
963 *
964 * @param pVCpu The cross context virtual CPU structure of the
965 * calling thread.
966 * @param fBypassHandlers Whether to bypass access handlers.
967 *
968 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
969 * side-effects in strict builds.
970 */
971DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
972{
973 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
974
975 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);
976 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
977
978#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
981 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
982 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
983 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
984 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
986 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
987#endif
988
989#ifdef VBOX_WITH_RAW_MODE_NOT_R0
990 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
991#endif
992 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
993 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pCtx);
994#ifdef VBOX_STRICT
995 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
996 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
997 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
998 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
999 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1000 pVCpu->iem.s.uRexReg = 127;
1001 pVCpu->iem.s.uRexB = 127;
1002 pVCpu->iem.s.uRexIndex = 127;
1003 pVCpu->iem.s.iEffSeg = 127;
1004 pVCpu->iem.s.idxPrefix = 127;
1005 pVCpu->iem.s.uVex3rdReg = 127;
1006 pVCpu->iem.s.uVexLength = 127;
1007 pVCpu->iem.s.fEvexStuff = 127;
1008 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1009# ifdef IEM_WITH_CODE_TLB
1010 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1011 pVCpu->iem.s.pbInstrBuf = NULL;
1012 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1013 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1014 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1015 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1016# else
1017 pVCpu->iem.s.offOpcode = 127;
1018 pVCpu->iem.s.cbOpcode = 127;
1019# endif
1020#endif
1021
1022 pVCpu->iem.s.cActiveMappings = 0;
1023 pVCpu->iem.s.iNextMapping = 0;
1024 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1025 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1026#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1027 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1028 && pCtx->cs.u64Base == 0
1029 && pCtx->cs.u32Limit == UINT32_MAX
1030 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1031 if (!pVCpu->iem.s.fInPatchCode)
1032 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1033#endif
1034}
1035
1036#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1037/**
1038 * Performs a minimal reinitialization of the execution state.
1039 *
1040 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1041 * 'world-switch' types operations on the CPU. Currently only nested
1042 * hardware-virtualization uses it.
1043 *
1044 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1045 */
1046IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1047{
1048 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1049 IEMMODE const enmMode = iemCalcCpuMode(pCtx);
1050 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1051
1052 pVCpu->iem.s.uCpl = uCpl;
1053 pVCpu->iem.s.enmCpuMode = enmMode;
1054 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1055 pVCpu->iem.s.enmEffAddrMode = enmMode;
1056 if (enmMode != IEMMODE_64BIT)
1057 {
1058 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1059 pVCpu->iem.s.enmEffOpSize = enmMode;
1060 }
1061 else
1062 {
1063 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1064 pVCpu->iem.s.enmEffOpSize = enmMode;
1065 }
1066 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1067#ifndef IEM_WITH_CODE_TLB
1068 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1069 pVCpu->iem.s.offOpcode = 0;
1070 pVCpu->iem.s.cbOpcode = 0;
1071#endif
1072 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1073}
1074#endif
1075
1076/**
1077 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1078 *
1079 * @param pVCpu The cross context virtual CPU structure of the
1080 * calling thread.
1081 */
1082DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1083{
1084 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1085#ifdef VBOX_STRICT
1086# ifdef IEM_WITH_CODE_TLB
1087 NOREF(pVCpu);
1088# else
1089 pVCpu->iem.s.cbOpcode = 0;
1090# endif
1091#else
1092 NOREF(pVCpu);
1093#endif
1094}
1095
1096
1097/**
1098 * Initializes the decoder state.
1099 *
1100 * iemReInitDecoder is mostly a copy of this function.
1101 *
1102 * @param pVCpu The cross context virtual CPU structure of the
1103 * calling thread.
1104 * @param fBypassHandlers Whether to bypass access handlers.
1105 */
1106DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1107{
1108 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1109
1110 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), IEM_CPUMCTX_EXTRN_MUST_MASK);
1111 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1112
1113#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1114 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1116 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1117 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1118 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1119 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1120 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1121 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1122#endif
1123
1124#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1125 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1126#endif
1127 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1128 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1129 pVCpu->iem.s.enmCpuMode = enmMode;
1130 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1131 pVCpu->iem.s.enmEffAddrMode = enmMode;
1132 if (enmMode != IEMMODE_64BIT)
1133 {
1134 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1135 pVCpu->iem.s.enmEffOpSize = enmMode;
1136 }
1137 else
1138 {
1139 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1140 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1141 }
1142 pVCpu->iem.s.fPrefixes = 0;
1143 pVCpu->iem.s.uRexReg = 0;
1144 pVCpu->iem.s.uRexB = 0;
1145 pVCpu->iem.s.uRexIndex = 0;
1146 pVCpu->iem.s.idxPrefix = 0;
1147 pVCpu->iem.s.uVex3rdReg = 0;
1148 pVCpu->iem.s.uVexLength = 0;
1149 pVCpu->iem.s.fEvexStuff = 0;
1150 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1151#ifdef IEM_WITH_CODE_TLB
1152 pVCpu->iem.s.pbInstrBuf = NULL;
1153 pVCpu->iem.s.offInstrNextByte = 0;
1154 pVCpu->iem.s.offCurInstrStart = 0;
1155# ifdef VBOX_STRICT
1156 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1157 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1158 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1159# endif
1160#else
1161 pVCpu->iem.s.offOpcode = 0;
1162 pVCpu->iem.s.cbOpcode = 0;
1163#endif
1164 pVCpu->iem.s.cActiveMappings = 0;
1165 pVCpu->iem.s.iNextMapping = 0;
1166 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1167 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1168#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1169 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1170 && pCtx->cs.u64Base == 0
1171 && pCtx->cs.u32Limit == UINT32_MAX
1172 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1173 if (!pVCpu->iem.s.fInPatchCode)
1174 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1175#endif
1176
1177#ifdef DBGFTRACE_ENABLED
1178 switch (enmMode)
1179 {
1180 case IEMMODE_64BIT:
1181 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1182 break;
1183 case IEMMODE_32BIT:
1184 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1185 break;
1186 case IEMMODE_16BIT:
1187 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1188 break;
1189 }
1190#endif
1191}
1192
1193
1194/**
1195 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1196 *
1197 * This is mostly a copy of iemInitDecoder.
1198 *
1199 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1200 */
1201DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1202{
1203 PCPUMCTX const pCtx = IEM_GET_CTX(pVCpu);
1204
1205 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1206
1207#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->es));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ds));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->fs));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->gs));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->tr));
1216#endif
1217
1218 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1219 IEMMODE enmMode = iemCalcCpuMode(pCtx);
1220 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1221 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1222 pVCpu->iem.s.enmEffAddrMode = enmMode;
1223 if (enmMode != IEMMODE_64BIT)
1224 {
1225 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1226 pVCpu->iem.s.enmEffOpSize = enmMode;
1227 }
1228 else
1229 {
1230 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1231 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1232 }
1233 pVCpu->iem.s.fPrefixes = 0;
1234 pVCpu->iem.s.uRexReg = 0;
1235 pVCpu->iem.s.uRexB = 0;
1236 pVCpu->iem.s.uRexIndex = 0;
1237 pVCpu->iem.s.idxPrefix = 0;
1238 pVCpu->iem.s.uVex3rdReg = 0;
1239 pVCpu->iem.s.uVexLength = 0;
1240 pVCpu->iem.s.fEvexStuff = 0;
1241 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1242#ifdef IEM_WITH_CODE_TLB
1243 if (pVCpu->iem.s.pbInstrBuf)
1244 {
1245 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pCtx->rip : pCtx->eip + (uint32_t)pCtx->cs.u64Base)
1246 - pVCpu->iem.s.uInstrBufPc;
1247 if (off < pVCpu->iem.s.cbInstrBufTotal)
1248 {
1249 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1250 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1251 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1252 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1253 else
1254 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1255 }
1256 else
1257 {
1258 pVCpu->iem.s.pbInstrBuf = NULL;
1259 pVCpu->iem.s.offInstrNextByte = 0;
1260 pVCpu->iem.s.offCurInstrStart = 0;
1261 pVCpu->iem.s.cbInstrBuf = 0;
1262 pVCpu->iem.s.cbInstrBufTotal = 0;
1263 }
1264 }
1265 else
1266 {
1267 pVCpu->iem.s.offInstrNextByte = 0;
1268 pVCpu->iem.s.offCurInstrStart = 0;
1269 pVCpu->iem.s.cbInstrBuf = 0;
1270 pVCpu->iem.s.cbInstrBufTotal = 0;
1271 }
1272#else
1273 pVCpu->iem.s.cbOpcode = 0;
1274 pVCpu->iem.s.offOpcode = 0;
1275#endif
1276 Assert(pVCpu->iem.s.cActiveMappings == 0);
1277 pVCpu->iem.s.iNextMapping = 0;
1278 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1279 Assert(pVCpu->iem.s.fBypassHandlers == false);
1280#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1281 if (!pVCpu->iem.s.fInPatchCode)
1282 { /* likely */ }
1283 else
1284 {
1285 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1286 && pCtx->cs.u64Base == 0
1287 && pCtx->cs.u32Limit == UINT32_MAX
1288 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip);
1289 if (!pVCpu->iem.s.fInPatchCode)
1290 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1291 }
1292#endif
1293
1294#ifdef DBGFTRACE_ENABLED
1295 switch (enmMode)
1296 {
1297 case IEMMODE_64BIT:
1298 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pCtx->rip);
1299 break;
1300 case IEMMODE_32BIT:
1301 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1302 break;
1303 case IEMMODE_16BIT:
1304 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pCtx->cs.Sel, pCtx->eip);
1305 break;
1306 }
1307#endif
1308}
1309
1310
1311
1312/**
1313 * Prefetch opcodes the first time when starting executing.
1314 *
1315 * @returns Strict VBox status code.
1316 * @param pVCpu The cross context virtual CPU structure of the
1317 * calling thread.
1318 * @param fBypassHandlers Whether to bypass access handlers.
1319 */
1320IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1321{
1322 iemInitDecoder(pVCpu, fBypassHandlers);
1323
1324#ifdef IEM_WITH_CODE_TLB
1325 /** @todo Do ITLB lookup here. */
1326
1327#else /* !IEM_WITH_CODE_TLB */
1328
1329 /*
1330 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1331 *
1332 * First translate CS:rIP to a physical address.
1333 */
1334 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1335 uint32_t cbToTryRead;
1336 RTGCPTR GCPtrPC;
1337 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1338 {
1339 cbToTryRead = PAGE_SIZE;
1340 GCPtrPC = pCtx->rip;
1341 if (IEM_IS_CANONICAL(GCPtrPC))
1342 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1343 else
1344 return iemRaiseGeneralProtectionFault0(pVCpu);
1345 }
1346 else
1347 {
1348 uint32_t GCPtrPC32 = pCtx->eip;
1349 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pCtx->cs.Sel, pCtx->rip));
1350 if (GCPtrPC32 <= pCtx->cs.u32Limit)
1351 cbToTryRead = pCtx->cs.u32Limit - GCPtrPC32 + 1;
1352 else
1353 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1354 if (cbToTryRead) { /* likely */ }
1355 else /* overflowed */
1356 {
1357 Assert(GCPtrPC32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1358 cbToTryRead = UINT32_MAX;
1359 }
1360 GCPtrPC = (uint32_t)pCtx->cs.u64Base + GCPtrPC32;
1361 Assert(GCPtrPC <= UINT32_MAX);
1362 }
1363
1364# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1365 /* Allow interpretation of patch manager code blocks since they can for
1366 instance throw #PFs for perfectly good reasons. */
1367 if (pVCpu->iem.s.fInPatchCode)
1368 {
1369 size_t cbRead = 0;
1370 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1371 AssertRCReturn(rc, rc);
1372 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1373 return VINF_SUCCESS;
1374 }
1375# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1376
1377 RTGCPHYS GCPhys;
1378 uint64_t fFlags;
1379 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1380 if (RT_SUCCESS(rc)) { /* probable */ }
1381 else
1382 {
1383 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1384 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1385 }
1386 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1387 else
1388 {
1389 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1390 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1391 }
1392 if (!(fFlags & X86_PTE_PAE_NX) || !(pCtx->msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1393 else
1394 {
1395 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1396 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1397 }
1398 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1399 /** @todo Check reserved bits and such stuff. PGM is better at doing
1400 * that, so do it when implementing the guest virtual address
1401 * TLB... */
1402
1403 /*
1404 * Read the bytes at this address.
1405 */
1406 PVM pVM = pVCpu->CTX_SUFF(pVM);
1407# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1408 size_t cbActual;
1409 if ( PATMIsEnabled(pVM)
1410 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1411 {
1412 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1413 Assert(cbActual > 0);
1414 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1415 }
1416 else
1417# endif
1418 {
1419 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1420 if (cbToTryRead > cbLeftOnPage)
1421 cbToTryRead = cbLeftOnPage;
1422 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1423 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1424
1425 if (!pVCpu->iem.s.fBypassHandlers)
1426 {
1427 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1428 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1429 { /* likely */ }
1430 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1431 {
1432 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1433 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1434 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1435 }
1436 else
1437 {
1438 Log((RT_SUCCESS(rcStrict)
1439 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1440 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1441 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1442 return rcStrict;
1443 }
1444 }
1445 else
1446 {
1447 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1448 if (RT_SUCCESS(rc))
1449 { /* likely */ }
1450 else
1451 {
1452 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1453 GCPtrPC, GCPhys, rc, cbToTryRead));
1454 return rc;
1455 }
1456 }
1457 pVCpu->iem.s.cbOpcode = cbToTryRead;
1458 }
1459#endif /* !IEM_WITH_CODE_TLB */
1460 return VINF_SUCCESS;
1461}
1462
1463
1464/**
1465 * Invalidates the IEM TLBs.
1466 *
1467 * This is called internally as well as by PGM when moving GC mappings.
1468 *
1469 * @returns
1470 * @param pVCpu The cross context virtual CPU structure of the calling
1471 * thread.
1472 * @param fVmm Set when PGM calls us with a remapping.
1473 */
1474VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1475{
1476#ifdef IEM_WITH_CODE_TLB
1477 pVCpu->iem.s.cbInstrBufTotal = 0;
1478 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1479 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1480 { /* very likely */ }
1481 else
1482 {
1483 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1484 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1485 while (i-- > 0)
1486 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1487 }
1488#endif
1489
1490#ifdef IEM_WITH_DATA_TLB
1491 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1492 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1493 { /* very likely */ }
1494 else
1495 {
1496 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1497 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1498 while (i-- > 0)
1499 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1500 }
1501#endif
1502 NOREF(pVCpu); NOREF(fVmm);
1503}
1504
1505
1506/**
1507 * Invalidates a page in the TLBs.
1508 *
1509 * @param pVCpu The cross context virtual CPU structure of the calling
1510 * thread.
1511 * @param GCPtr The address of the page to invalidate
1512 */
1513VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1514{
1515#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1516 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1517 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1518 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1519 uintptr_t idx = (uint8_t)GCPtr;
1520
1521# ifdef IEM_WITH_CODE_TLB
1522 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1523 {
1524 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1525 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1526 pVCpu->iem.s.cbInstrBufTotal = 0;
1527 }
1528# endif
1529
1530# ifdef IEM_WITH_DATA_TLB
1531 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1532 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1533# endif
1534#else
1535 NOREF(pVCpu); NOREF(GCPtr);
1536#endif
1537}
1538
1539
1540/**
1541 * Invalidates the host physical aspects of the IEM TLBs.
1542 *
1543 * This is called internally as well as by PGM when moving GC mappings.
1544 *
1545 * @param pVCpu The cross context virtual CPU structure of the calling
1546 * thread.
1547 */
1548VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1549{
1550#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1551 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1552
1553# ifdef IEM_WITH_CODE_TLB
1554 pVCpu->iem.s.cbInstrBufTotal = 0;
1555# endif
1556 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1557 if (uTlbPhysRev != 0)
1558 {
1559 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1560 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1561 }
1562 else
1563 {
1564 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1565 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1566
1567 unsigned i;
1568# ifdef IEM_WITH_CODE_TLB
1569 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1570 while (i-- > 0)
1571 {
1572 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1573 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1574 }
1575# endif
1576# ifdef IEM_WITH_DATA_TLB
1577 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1578 while (i-- > 0)
1579 {
1580 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1581 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1582 }
1583# endif
1584 }
1585#else
1586 NOREF(pVCpu);
1587#endif
1588}
1589
1590
1591/**
1592 * Invalidates the host physical aspects of the IEM TLBs.
1593 *
1594 * This is called internally as well as by PGM when moving GC mappings.
1595 *
1596 * @param pVM The cross context VM structure.
1597 *
1598 * @remarks Caller holds the PGM lock.
1599 */
1600VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1601{
1602 RT_NOREF_PV(pVM);
1603}
1604
1605#ifdef IEM_WITH_CODE_TLB
1606
1607/**
1608 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1609 * failure and jumps.
1610 *
1611 * We end up here for a number of reasons:
1612 * - pbInstrBuf isn't yet initialized.
1613 * - Advancing beyond the buffer boundrary (e.g. cross page).
1614 * - Advancing beyond the CS segment limit.
1615 * - Fetching from non-mappable page (e.g. MMIO).
1616 *
1617 * @param pVCpu The cross context virtual CPU structure of the
1618 * calling thread.
1619 * @param pvDst Where to return the bytes.
1620 * @param cbDst Number of bytes to read.
1621 *
1622 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1623 */
1624IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1625{
1626#ifdef IN_RING3
1627 for (;;)
1628 {
1629 Assert(cbDst <= 8);
1630 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1631
1632 /*
1633 * We might have a partial buffer match, deal with that first to make the
1634 * rest simpler. This is the first part of the cross page/buffer case.
1635 */
1636 if (pVCpu->iem.s.pbInstrBuf != NULL)
1637 {
1638 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1639 {
1640 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1641 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1642 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1643
1644 cbDst -= cbCopy;
1645 pvDst = (uint8_t *)pvDst + cbCopy;
1646 offBuf += cbCopy;
1647 pVCpu->iem.s.offInstrNextByte += offBuf;
1648 }
1649 }
1650
1651 /*
1652 * Check segment limit, figuring how much we're allowed to access at this point.
1653 *
1654 * We will fault immediately if RIP is past the segment limit / in non-canonical
1655 * territory. If we do continue, there are one or more bytes to read before we
1656 * end up in trouble and we need to do that first before faulting.
1657 */
1658 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1659 RTGCPTR GCPtrFirst;
1660 uint32_t cbMaxRead;
1661 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1662 {
1663 GCPtrFirst = pCtx->rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1664 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1665 { /* likely */ }
1666 else
1667 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1668 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1669 }
1670 else
1671 {
1672 GCPtrFirst = pCtx->eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1673 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1674 if (RT_LIKELY((uint32_t)GCPtrFirst <= pCtx->cs.u32Limit))
1675 { /* likely */ }
1676 else
1677 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1678 cbMaxRead = pCtx->cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1679 if (cbMaxRead != 0)
1680 { /* likely */ }
1681 else
1682 {
1683 /* Overflowed because address is 0 and limit is max. */
1684 Assert(GCPtrFirst == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1685 cbMaxRead = X86_PAGE_SIZE;
1686 }
1687 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pCtx->cs.u64Base;
1688 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1689 if (cbMaxRead2 < cbMaxRead)
1690 cbMaxRead = cbMaxRead2;
1691 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1692 }
1693
1694 /*
1695 * Get the TLB entry for this piece of code.
1696 */
1697 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1698 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1699 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1700 if (pTlbe->uTag == uTag)
1701 {
1702 /* likely when executing lots of code, otherwise unlikely */
1703# ifdef VBOX_WITH_STATISTICS
1704 pVCpu->iem.s.CodeTlb.cTlbHits++;
1705# endif
1706 }
1707 else
1708 {
1709 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1710# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1711 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pCtx->eip))
1712 {
1713 pTlbe->uTag = uTag;
1714 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1715 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1716 pTlbe->GCPhys = NIL_RTGCPHYS;
1717 pTlbe->pbMappingR3 = NULL;
1718 }
1719 else
1720# endif
1721 {
1722 RTGCPHYS GCPhys;
1723 uint64_t fFlags;
1724 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1725 if (RT_FAILURE(rc))
1726 {
1727 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1728 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1729 }
1730
1731 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1732 pTlbe->uTag = uTag;
1733 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1734 pTlbe->GCPhys = GCPhys;
1735 pTlbe->pbMappingR3 = NULL;
1736 }
1737 }
1738
1739 /*
1740 * Check TLB page table level access flags.
1741 */
1742 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1743 {
1744 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1745 {
1746 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1747 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1748 }
1749 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1750 {
1751 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1752 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1753 }
1754 }
1755
1756# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1757 /*
1758 * Allow interpretation of patch manager code blocks since they can for
1759 * instance throw #PFs for perfectly good reasons.
1760 */
1761 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1762 { /* no unlikely */ }
1763 else
1764 {
1765 /** @todo Could be optimized this a little in ring-3 if we liked. */
1766 size_t cbRead = 0;
1767 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1768 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1769 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1770 return;
1771 }
1772# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1773
1774 /*
1775 * Look up the physical page info if necessary.
1776 */
1777 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1778 { /* not necessary */ }
1779 else
1780 {
1781 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1782 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1783 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1784 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1785 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1786 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1787 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1788 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1789 }
1790
1791# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1792 /*
1793 * Try do a direct read using the pbMappingR3 pointer.
1794 */
1795 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1796 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1797 {
1798 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1799 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1800 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1801 {
1802 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1803 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1804 }
1805 else
1806 {
1807 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1808 Assert(cbInstr < cbMaxRead);
1809 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1810 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1811 }
1812 if (cbDst <= cbMaxRead)
1813 {
1814 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1815 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1816 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1817 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1818 return;
1819 }
1820 pVCpu->iem.s.pbInstrBuf = NULL;
1821
1822 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1823 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1824 }
1825 else
1826# endif
1827#if 0
1828 /*
1829 * If there is no special read handling, so we can read a bit more and
1830 * put it in the prefetch buffer.
1831 */
1832 if ( cbDst < cbMaxRead
1833 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1834 {
1835 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1836 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1837 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1838 { /* likely */ }
1839 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1840 {
1841 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1842 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1843 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1844 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1845 }
1846 else
1847 {
1848 Log((RT_SUCCESS(rcStrict)
1849 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1850 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1851 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1852 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1853 }
1854 }
1855 /*
1856 * Special read handling, so only read exactly what's needed.
1857 * This is a highly unlikely scenario.
1858 */
1859 else
1860#endif
1861 {
1862 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1863 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1864 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1865 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1866 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1867 { /* likely */ }
1868 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1869 {
1870 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1871 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1872 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1873 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1874 }
1875 else
1876 {
1877 Log((RT_SUCCESS(rcStrict)
1878 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1879 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1880 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1881 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1882 }
1883 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1884 if (cbToRead == cbDst)
1885 return;
1886 }
1887
1888 /*
1889 * More to read, loop.
1890 */
1891 cbDst -= cbMaxRead;
1892 pvDst = (uint8_t *)pvDst + cbMaxRead;
1893 }
1894#else
1895 RT_NOREF(pvDst, cbDst);
1896 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1897#endif
1898}
1899
1900#else
1901
1902/**
1903 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1904 * exception if it fails.
1905 *
1906 * @returns Strict VBox status code.
1907 * @param pVCpu The cross context virtual CPU structure of the
1908 * calling thread.
1909 * @param cbMin The minimum number of bytes relative offOpcode
1910 * that must be read.
1911 */
1912IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1913{
1914 /*
1915 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1916 *
1917 * First translate CS:rIP to a physical address.
1918 */
1919 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
1920 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1921 uint32_t cbToTryRead;
1922 RTGCPTR GCPtrNext;
1923 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1924 {
1925 cbToTryRead = PAGE_SIZE;
1926 GCPtrNext = pCtx->rip + pVCpu->iem.s.cbOpcode;
1927 if (!IEM_IS_CANONICAL(GCPtrNext))
1928 return iemRaiseGeneralProtectionFault0(pVCpu);
1929 }
1930 else
1931 {
1932 uint32_t GCPtrNext32 = pCtx->eip;
1933 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1934 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1935 if (GCPtrNext32 > pCtx->cs.u32Limit)
1936 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1937 cbToTryRead = pCtx->cs.u32Limit - GCPtrNext32 + 1;
1938 if (!cbToTryRead) /* overflowed */
1939 {
1940 Assert(GCPtrNext32 == 0); Assert(pCtx->cs.u32Limit == UINT32_MAX);
1941 cbToTryRead = UINT32_MAX;
1942 /** @todo check out wrapping around the code segment. */
1943 }
1944 if (cbToTryRead < cbMin - cbLeft)
1945 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1946 GCPtrNext = (uint32_t)pCtx->cs.u64Base + GCPtrNext32;
1947 }
1948
1949 /* Only read up to the end of the page, and make sure we don't read more
1950 than the opcode buffer can hold. */
1951 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1952 if (cbToTryRead > cbLeftOnPage)
1953 cbToTryRead = cbLeftOnPage;
1954 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1955 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1956/** @todo r=bird: Convert assertion into undefined opcode exception? */
1957 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1958
1959# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1960 /* Allow interpretation of patch manager code blocks since they can for
1961 instance throw #PFs for perfectly good reasons. */
1962 if (pVCpu->iem.s.fInPatchCode)
1963 {
1964 size_t cbRead = 0;
1965 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1966 AssertRCReturn(rc, rc);
1967 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1968 return VINF_SUCCESS;
1969 }
1970# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1971
1972 RTGCPHYS GCPhys;
1973 uint64_t fFlags;
1974 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1975 if (RT_FAILURE(rc))
1976 {
1977 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1978 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1979 }
1980 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1981 {
1982 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1983 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1984 }
1985 if ((fFlags & X86_PTE_PAE_NX) && (pCtx->msrEFER & MSR_K6_EFER_NXE))
1986 {
1987 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1988 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1989 }
1990 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1991 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1992 /** @todo Check reserved bits and such stuff. PGM is better at doing
1993 * that, so do it when implementing the guest virtual address
1994 * TLB... */
1995
1996 /*
1997 * Read the bytes at this address.
1998 *
1999 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2000 * and since PATM should only patch the start of an instruction there
2001 * should be no need to check again here.
2002 */
2003 if (!pVCpu->iem.s.fBypassHandlers)
2004 {
2005 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2006 cbToTryRead, PGMACCESSORIGIN_IEM);
2007 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2008 { /* likely */ }
2009 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2010 {
2011 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2012 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2013 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2014 }
2015 else
2016 {
2017 Log((RT_SUCCESS(rcStrict)
2018 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2019 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2020 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2021 return rcStrict;
2022 }
2023 }
2024 else
2025 {
2026 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2027 if (RT_SUCCESS(rc))
2028 { /* likely */ }
2029 else
2030 {
2031 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2032 return rc;
2033 }
2034 }
2035 pVCpu->iem.s.cbOpcode += cbToTryRead;
2036 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2037
2038 return VINF_SUCCESS;
2039}
2040
2041#endif /* !IEM_WITH_CODE_TLB */
2042#ifndef IEM_WITH_SETJMP
2043
2044/**
2045 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2046 *
2047 * @returns Strict VBox status code.
2048 * @param pVCpu The cross context virtual CPU structure of the
2049 * calling thread.
2050 * @param pb Where to return the opcode byte.
2051 */
2052DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2053{
2054 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2055 if (rcStrict == VINF_SUCCESS)
2056 {
2057 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2058 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2059 pVCpu->iem.s.offOpcode = offOpcode + 1;
2060 }
2061 else
2062 *pb = 0;
2063 return rcStrict;
2064}
2065
2066
2067/**
2068 * Fetches the next opcode byte.
2069 *
2070 * @returns Strict VBox status code.
2071 * @param pVCpu The cross context virtual CPU structure of the
2072 * calling thread.
2073 * @param pu8 Where to return the opcode byte.
2074 */
2075DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2076{
2077 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2078 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2079 {
2080 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2081 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2082 return VINF_SUCCESS;
2083 }
2084 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2085}
2086
2087#else /* IEM_WITH_SETJMP */
2088
2089/**
2090 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2091 *
2092 * @returns The opcode byte.
2093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2094 */
2095DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2096{
2097# ifdef IEM_WITH_CODE_TLB
2098 uint8_t u8;
2099 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2100 return u8;
2101# else
2102 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2103 if (rcStrict == VINF_SUCCESS)
2104 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2105 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2106# endif
2107}
2108
2109
2110/**
2111 * Fetches the next opcode byte, longjmp on error.
2112 *
2113 * @returns The opcode byte.
2114 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2115 */
2116DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2117{
2118# ifdef IEM_WITH_CODE_TLB
2119 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2120 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2121 if (RT_LIKELY( pbBuf != NULL
2122 && offBuf < pVCpu->iem.s.cbInstrBuf))
2123 {
2124 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2125 return pbBuf[offBuf];
2126 }
2127# else
2128 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2129 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2130 {
2131 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2132 return pVCpu->iem.s.abOpcode[offOpcode];
2133 }
2134# endif
2135 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2136}
2137
2138#endif /* IEM_WITH_SETJMP */
2139
2140/**
2141 * Fetches the next opcode byte, returns automatically on failure.
2142 *
2143 * @param a_pu8 Where to return the opcode byte.
2144 * @remark Implicitly references pVCpu.
2145 */
2146#ifndef IEM_WITH_SETJMP
2147# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2148 do \
2149 { \
2150 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2151 if (rcStrict2 == VINF_SUCCESS) \
2152 { /* likely */ } \
2153 else \
2154 return rcStrict2; \
2155 } while (0)
2156#else
2157# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2158#endif /* IEM_WITH_SETJMP */
2159
2160
2161#ifndef IEM_WITH_SETJMP
2162/**
2163 * Fetches the next signed byte from the opcode stream.
2164 *
2165 * @returns Strict VBox status code.
2166 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2167 * @param pi8 Where to return the signed byte.
2168 */
2169DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2170{
2171 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2172}
2173#endif /* !IEM_WITH_SETJMP */
2174
2175
2176/**
2177 * Fetches the next signed byte from the opcode stream, returning automatically
2178 * on failure.
2179 *
2180 * @param a_pi8 Where to return the signed byte.
2181 * @remark Implicitly references pVCpu.
2182 */
2183#ifndef IEM_WITH_SETJMP
2184# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2185 do \
2186 { \
2187 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2188 if (rcStrict2 != VINF_SUCCESS) \
2189 return rcStrict2; \
2190 } while (0)
2191#else /* IEM_WITH_SETJMP */
2192# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2193
2194#endif /* IEM_WITH_SETJMP */
2195
2196#ifndef IEM_WITH_SETJMP
2197
2198/**
2199 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2200 *
2201 * @returns Strict VBox status code.
2202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2203 * @param pu16 Where to return the opcode dword.
2204 */
2205DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2206{
2207 uint8_t u8;
2208 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2209 if (rcStrict == VINF_SUCCESS)
2210 *pu16 = (int8_t)u8;
2211 return rcStrict;
2212}
2213
2214
2215/**
2216 * Fetches the next signed byte from the opcode stream, extending it to
2217 * unsigned 16-bit.
2218 *
2219 * @returns Strict VBox status code.
2220 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2221 * @param pu16 Where to return the unsigned word.
2222 */
2223DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2224{
2225 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2226 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2227 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2228
2229 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2230 pVCpu->iem.s.offOpcode = offOpcode + 1;
2231 return VINF_SUCCESS;
2232}
2233
2234#endif /* !IEM_WITH_SETJMP */
2235
2236/**
2237 * Fetches the next signed byte from the opcode stream and sign-extending it to
2238 * a word, returning automatically on failure.
2239 *
2240 * @param a_pu16 Where to return the word.
2241 * @remark Implicitly references pVCpu.
2242 */
2243#ifndef IEM_WITH_SETJMP
2244# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2245 do \
2246 { \
2247 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2248 if (rcStrict2 != VINF_SUCCESS) \
2249 return rcStrict2; \
2250 } while (0)
2251#else
2252# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2253#endif
2254
2255#ifndef IEM_WITH_SETJMP
2256
2257/**
2258 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2259 *
2260 * @returns Strict VBox status code.
2261 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2262 * @param pu32 Where to return the opcode dword.
2263 */
2264DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2265{
2266 uint8_t u8;
2267 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2268 if (rcStrict == VINF_SUCCESS)
2269 *pu32 = (int8_t)u8;
2270 return rcStrict;
2271}
2272
2273
2274/**
2275 * Fetches the next signed byte from the opcode stream, extending it to
2276 * unsigned 32-bit.
2277 *
2278 * @returns Strict VBox status code.
2279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2280 * @param pu32 Where to return the unsigned dword.
2281 */
2282DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2283{
2284 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2285 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2286 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2287
2288 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2289 pVCpu->iem.s.offOpcode = offOpcode + 1;
2290 return VINF_SUCCESS;
2291}
2292
2293#endif /* !IEM_WITH_SETJMP */
2294
2295/**
2296 * Fetches the next signed byte from the opcode stream and sign-extending it to
2297 * a word, returning automatically on failure.
2298 *
2299 * @param a_pu32 Where to return the word.
2300 * @remark Implicitly references pVCpu.
2301 */
2302#ifndef IEM_WITH_SETJMP
2303#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2304 do \
2305 { \
2306 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2307 if (rcStrict2 != VINF_SUCCESS) \
2308 return rcStrict2; \
2309 } while (0)
2310#else
2311# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2312#endif
2313
2314#ifndef IEM_WITH_SETJMP
2315
2316/**
2317 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2318 *
2319 * @returns Strict VBox status code.
2320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2321 * @param pu64 Where to return the opcode qword.
2322 */
2323DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2324{
2325 uint8_t u8;
2326 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2327 if (rcStrict == VINF_SUCCESS)
2328 *pu64 = (int8_t)u8;
2329 return rcStrict;
2330}
2331
2332
2333/**
2334 * Fetches the next signed byte from the opcode stream, extending it to
2335 * unsigned 64-bit.
2336 *
2337 * @returns Strict VBox status code.
2338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2339 * @param pu64 Where to return the unsigned qword.
2340 */
2341DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2342{
2343 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2344 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2345 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2346
2347 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2348 pVCpu->iem.s.offOpcode = offOpcode + 1;
2349 return VINF_SUCCESS;
2350}
2351
2352#endif /* !IEM_WITH_SETJMP */
2353
2354
2355/**
2356 * Fetches the next signed byte from the opcode stream and sign-extending it to
2357 * a word, returning automatically on failure.
2358 *
2359 * @param a_pu64 Where to return the word.
2360 * @remark Implicitly references pVCpu.
2361 */
2362#ifndef IEM_WITH_SETJMP
2363# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2364 do \
2365 { \
2366 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2367 if (rcStrict2 != VINF_SUCCESS) \
2368 return rcStrict2; \
2369 } while (0)
2370#else
2371# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2372#endif
2373
2374
2375#ifndef IEM_WITH_SETJMP
2376
2377/**
2378 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2379 *
2380 * @returns Strict VBox status code.
2381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2382 * @param pu16 Where to return the opcode word.
2383 */
2384DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2385{
2386 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2387 if (rcStrict == VINF_SUCCESS)
2388 {
2389 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2390# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2391 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2392# else
2393 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2394# endif
2395 pVCpu->iem.s.offOpcode = offOpcode + 2;
2396 }
2397 else
2398 *pu16 = 0;
2399 return rcStrict;
2400}
2401
2402
2403/**
2404 * Fetches the next opcode word.
2405 *
2406 * @returns Strict VBox status code.
2407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2408 * @param pu16 Where to return the opcode word.
2409 */
2410DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2411{
2412 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2413 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2414 {
2415 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2416# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2417 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2418# else
2419 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2420# endif
2421 return VINF_SUCCESS;
2422 }
2423 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2424}
2425
2426#else /* IEM_WITH_SETJMP */
2427
2428/**
2429 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2430 *
2431 * @returns The opcode word.
2432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2433 */
2434DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2435{
2436# ifdef IEM_WITH_CODE_TLB
2437 uint16_t u16;
2438 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2439 return u16;
2440# else
2441 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2442 if (rcStrict == VINF_SUCCESS)
2443 {
2444 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2445 pVCpu->iem.s.offOpcode += 2;
2446# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2447 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2448# else
2449 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2450# endif
2451 }
2452 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2453# endif
2454}
2455
2456
2457/**
2458 * Fetches the next opcode word, longjmp on error.
2459 *
2460 * @returns The opcode word.
2461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2462 */
2463DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2464{
2465# ifdef IEM_WITH_CODE_TLB
2466 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2467 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2468 if (RT_LIKELY( pbBuf != NULL
2469 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2470 {
2471 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2472# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2473 return *(uint16_t const *)&pbBuf[offBuf];
2474# else
2475 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2476# endif
2477 }
2478# else
2479 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2480 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2481 {
2482 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2483# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2484 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2485# else
2486 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2487# endif
2488 }
2489# endif
2490 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2491}
2492
2493#endif /* IEM_WITH_SETJMP */
2494
2495
2496/**
2497 * Fetches the next opcode word, returns automatically on failure.
2498 *
2499 * @param a_pu16 Where to return the opcode word.
2500 * @remark Implicitly references pVCpu.
2501 */
2502#ifndef IEM_WITH_SETJMP
2503# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2504 do \
2505 { \
2506 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2507 if (rcStrict2 != VINF_SUCCESS) \
2508 return rcStrict2; \
2509 } while (0)
2510#else
2511# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2512#endif
2513
2514#ifndef IEM_WITH_SETJMP
2515
2516/**
2517 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2518 *
2519 * @returns Strict VBox status code.
2520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2521 * @param pu32 Where to return the opcode double word.
2522 */
2523DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2524{
2525 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2526 if (rcStrict == VINF_SUCCESS)
2527 {
2528 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2529 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2530 pVCpu->iem.s.offOpcode = offOpcode + 2;
2531 }
2532 else
2533 *pu32 = 0;
2534 return rcStrict;
2535}
2536
2537
2538/**
2539 * Fetches the next opcode word, zero extending it to a double word.
2540 *
2541 * @returns Strict VBox status code.
2542 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2543 * @param pu32 Where to return the opcode double word.
2544 */
2545DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2546{
2547 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2548 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2549 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2550
2551 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2552 pVCpu->iem.s.offOpcode = offOpcode + 2;
2553 return VINF_SUCCESS;
2554}
2555
2556#endif /* !IEM_WITH_SETJMP */
2557
2558
2559/**
2560 * Fetches the next opcode word and zero extends it to a double word, returns
2561 * automatically on failure.
2562 *
2563 * @param a_pu32 Where to return the opcode double word.
2564 * @remark Implicitly references pVCpu.
2565 */
2566#ifndef IEM_WITH_SETJMP
2567# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2568 do \
2569 { \
2570 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2571 if (rcStrict2 != VINF_SUCCESS) \
2572 return rcStrict2; \
2573 } while (0)
2574#else
2575# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2576#endif
2577
2578#ifndef IEM_WITH_SETJMP
2579
2580/**
2581 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2582 *
2583 * @returns Strict VBox status code.
2584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2585 * @param pu64 Where to return the opcode quad word.
2586 */
2587DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2588{
2589 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2590 if (rcStrict == VINF_SUCCESS)
2591 {
2592 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2593 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2594 pVCpu->iem.s.offOpcode = offOpcode + 2;
2595 }
2596 else
2597 *pu64 = 0;
2598 return rcStrict;
2599}
2600
2601
2602/**
2603 * Fetches the next opcode word, zero extending it to a quad word.
2604 *
2605 * @returns Strict VBox status code.
2606 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2607 * @param pu64 Where to return the opcode quad word.
2608 */
2609DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2610{
2611 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2612 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2613 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2614
2615 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2616 pVCpu->iem.s.offOpcode = offOpcode + 2;
2617 return VINF_SUCCESS;
2618}
2619
2620#endif /* !IEM_WITH_SETJMP */
2621
2622/**
2623 * Fetches the next opcode word and zero extends it to a quad word, returns
2624 * automatically on failure.
2625 *
2626 * @param a_pu64 Where to return the opcode quad word.
2627 * @remark Implicitly references pVCpu.
2628 */
2629#ifndef IEM_WITH_SETJMP
2630# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2631 do \
2632 { \
2633 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2634 if (rcStrict2 != VINF_SUCCESS) \
2635 return rcStrict2; \
2636 } while (0)
2637#else
2638# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2639#endif
2640
2641
2642#ifndef IEM_WITH_SETJMP
2643/**
2644 * Fetches the next signed word from the opcode stream.
2645 *
2646 * @returns Strict VBox status code.
2647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2648 * @param pi16 Where to return the signed word.
2649 */
2650DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2651{
2652 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2653}
2654#endif /* !IEM_WITH_SETJMP */
2655
2656
2657/**
2658 * Fetches the next signed word from the opcode stream, returning automatically
2659 * on failure.
2660 *
2661 * @param a_pi16 Where to return the signed word.
2662 * @remark Implicitly references pVCpu.
2663 */
2664#ifndef IEM_WITH_SETJMP
2665# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2666 do \
2667 { \
2668 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2669 if (rcStrict2 != VINF_SUCCESS) \
2670 return rcStrict2; \
2671 } while (0)
2672#else
2673# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2674#endif
2675
2676#ifndef IEM_WITH_SETJMP
2677
2678/**
2679 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2680 *
2681 * @returns Strict VBox status code.
2682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2683 * @param pu32 Where to return the opcode dword.
2684 */
2685DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2686{
2687 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2688 if (rcStrict == VINF_SUCCESS)
2689 {
2690 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2691# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2692 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2693# else
2694 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2695 pVCpu->iem.s.abOpcode[offOpcode + 1],
2696 pVCpu->iem.s.abOpcode[offOpcode + 2],
2697 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2698# endif
2699 pVCpu->iem.s.offOpcode = offOpcode + 4;
2700 }
2701 else
2702 *pu32 = 0;
2703 return rcStrict;
2704}
2705
2706
2707/**
2708 * Fetches the next opcode dword.
2709 *
2710 * @returns Strict VBox status code.
2711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2712 * @param pu32 Where to return the opcode double word.
2713 */
2714DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2715{
2716 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2717 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2718 {
2719 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2720# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2721 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2722# else
2723 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2724 pVCpu->iem.s.abOpcode[offOpcode + 1],
2725 pVCpu->iem.s.abOpcode[offOpcode + 2],
2726 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2727# endif
2728 return VINF_SUCCESS;
2729 }
2730 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2731}
2732
2733#else /* !IEM_WITH_SETJMP */
2734
2735/**
2736 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2737 *
2738 * @returns The opcode dword.
2739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2740 */
2741DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2742{
2743# ifdef IEM_WITH_CODE_TLB
2744 uint32_t u32;
2745 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2746 return u32;
2747# else
2748 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2749 if (rcStrict == VINF_SUCCESS)
2750 {
2751 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2752 pVCpu->iem.s.offOpcode = offOpcode + 4;
2753# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2754 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2755# else
2756 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2757 pVCpu->iem.s.abOpcode[offOpcode + 1],
2758 pVCpu->iem.s.abOpcode[offOpcode + 2],
2759 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2760# endif
2761 }
2762 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2763# endif
2764}
2765
2766
2767/**
2768 * Fetches the next opcode dword, longjmp on error.
2769 *
2770 * @returns The opcode dword.
2771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2772 */
2773DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2774{
2775# ifdef IEM_WITH_CODE_TLB
2776 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2777 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2778 if (RT_LIKELY( pbBuf != NULL
2779 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2780 {
2781 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2782# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2783 return *(uint32_t const *)&pbBuf[offBuf];
2784# else
2785 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2786 pbBuf[offBuf + 1],
2787 pbBuf[offBuf + 2],
2788 pbBuf[offBuf + 3]);
2789# endif
2790 }
2791# else
2792 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2793 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2794 {
2795 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2796# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2797 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2798# else
2799 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2800 pVCpu->iem.s.abOpcode[offOpcode + 1],
2801 pVCpu->iem.s.abOpcode[offOpcode + 2],
2802 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2803# endif
2804 }
2805# endif
2806 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2807}
2808
2809#endif /* !IEM_WITH_SETJMP */
2810
2811
2812/**
2813 * Fetches the next opcode dword, returns automatically on failure.
2814 *
2815 * @param a_pu32 Where to return the opcode dword.
2816 * @remark Implicitly references pVCpu.
2817 */
2818#ifndef IEM_WITH_SETJMP
2819# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2820 do \
2821 { \
2822 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2823 if (rcStrict2 != VINF_SUCCESS) \
2824 return rcStrict2; \
2825 } while (0)
2826#else
2827# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2828#endif
2829
2830#ifndef IEM_WITH_SETJMP
2831
2832/**
2833 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2834 *
2835 * @returns Strict VBox status code.
2836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2837 * @param pu64 Where to return the opcode dword.
2838 */
2839DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2840{
2841 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2842 if (rcStrict == VINF_SUCCESS)
2843 {
2844 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2845 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2846 pVCpu->iem.s.abOpcode[offOpcode + 1],
2847 pVCpu->iem.s.abOpcode[offOpcode + 2],
2848 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2849 pVCpu->iem.s.offOpcode = offOpcode + 4;
2850 }
2851 else
2852 *pu64 = 0;
2853 return rcStrict;
2854}
2855
2856
2857/**
2858 * Fetches the next opcode dword, zero extending it to a quad word.
2859 *
2860 * @returns Strict VBox status code.
2861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2862 * @param pu64 Where to return the opcode quad word.
2863 */
2864DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2865{
2866 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2867 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2868 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2869
2870 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2871 pVCpu->iem.s.abOpcode[offOpcode + 1],
2872 pVCpu->iem.s.abOpcode[offOpcode + 2],
2873 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2874 pVCpu->iem.s.offOpcode = offOpcode + 4;
2875 return VINF_SUCCESS;
2876}
2877
2878#endif /* !IEM_WITH_SETJMP */
2879
2880
2881/**
2882 * Fetches the next opcode dword and zero extends it to a quad word, returns
2883 * automatically on failure.
2884 *
2885 * @param a_pu64 Where to return the opcode quad word.
2886 * @remark Implicitly references pVCpu.
2887 */
2888#ifndef IEM_WITH_SETJMP
2889# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2890 do \
2891 { \
2892 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2893 if (rcStrict2 != VINF_SUCCESS) \
2894 return rcStrict2; \
2895 } while (0)
2896#else
2897# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2898#endif
2899
2900
2901#ifndef IEM_WITH_SETJMP
2902/**
2903 * Fetches the next signed double word from the opcode stream.
2904 *
2905 * @returns Strict VBox status code.
2906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2907 * @param pi32 Where to return the signed double word.
2908 */
2909DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
2910{
2911 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2912}
2913#endif
2914
2915/**
2916 * Fetches the next signed double word from the opcode stream, returning
2917 * automatically on failure.
2918 *
2919 * @param a_pi32 Where to return the signed double word.
2920 * @remark Implicitly references pVCpu.
2921 */
2922#ifndef IEM_WITH_SETJMP
2923# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2924 do \
2925 { \
2926 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2927 if (rcStrict2 != VINF_SUCCESS) \
2928 return rcStrict2; \
2929 } while (0)
2930#else
2931# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
2932#endif
2933
2934#ifndef IEM_WITH_SETJMP
2935
2936/**
2937 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
2938 *
2939 * @returns Strict VBox status code.
2940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2941 * @param pu64 Where to return the opcode qword.
2942 */
2943DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2944{
2945 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2946 if (rcStrict == VINF_SUCCESS)
2947 {
2948 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2949 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2950 pVCpu->iem.s.abOpcode[offOpcode + 1],
2951 pVCpu->iem.s.abOpcode[offOpcode + 2],
2952 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2953 pVCpu->iem.s.offOpcode = offOpcode + 4;
2954 }
2955 else
2956 *pu64 = 0;
2957 return rcStrict;
2958}
2959
2960
2961/**
2962 * Fetches the next opcode dword, sign extending it into a quad word.
2963 *
2964 * @returns Strict VBox status code.
2965 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2966 * @param pu64 Where to return the opcode quad word.
2967 */
2968DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
2969{
2970 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2971 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2972 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
2973
2974 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2975 pVCpu->iem.s.abOpcode[offOpcode + 1],
2976 pVCpu->iem.s.abOpcode[offOpcode + 2],
2977 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2978 *pu64 = i32;
2979 pVCpu->iem.s.offOpcode = offOpcode + 4;
2980 return VINF_SUCCESS;
2981}
2982
2983#endif /* !IEM_WITH_SETJMP */
2984
2985
2986/**
2987 * Fetches the next opcode double word and sign extends it to a quad word,
2988 * returns automatically on failure.
2989 *
2990 * @param a_pu64 Where to return the opcode quad word.
2991 * @remark Implicitly references pVCpu.
2992 */
2993#ifndef IEM_WITH_SETJMP
2994# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
2995 do \
2996 { \
2997 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
2998 if (rcStrict2 != VINF_SUCCESS) \
2999 return rcStrict2; \
3000 } while (0)
3001#else
3002# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3003#endif
3004
3005#ifndef IEM_WITH_SETJMP
3006
3007/**
3008 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3009 *
3010 * @returns Strict VBox status code.
3011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3012 * @param pu64 Where to return the opcode qword.
3013 */
3014DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3015{
3016 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3017 if (rcStrict == VINF_SUCCESS)
3018 {
3019 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3020# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3021 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3022# else
3023 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3024 pVCpu->iem.s.abOpcode[offOpcode + 1],
3025 pVCpu->iem.s.abOpcode[offOpcode + 2],
3026 pVCpu->iem.s.abOpcode[offOpcode + 3],
3027 pVCpu->iem.s.abOpcode[offOpcode + 4],
3028 pVCpu->iem.s.abOpcode[offOpcode + 5],
3029 pVCpu->iem.s.abOpcode[offOpcode + 6],
3030 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3031# endif
3032 pVCpu->iem.s.offOpcode = offOpcode + 8;
3033 }
3034 else
3035 *pu64 = 0;
3036 return rcStrict;
3037}
3038
3039
3040/**
3041 * Fetches the next opcode qword.
3042 *
3043 * @returns Strict VBox status code.
3044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3045 * @param pu64 Where to return the opcode qword.
3046 */
3047DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3048{
3049 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3050 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3051 {
3052# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3053 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3054# else
3055 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3056 pVCpu->iem.s.abOpcode[offOpcode + 1],
3057 pVCpu->iem.s.abOpcode[offOpcode + 2],
3058 pVCpu->iem.s.abOpcode[offOpcode + 3],
3059 pVCpu->iem.s.abOpcode[offOpcode + 4],
3060 pVCpu->iem.s.abOpcode[offOpcode + 5],
3061 pVCpu->iem.s.abOpcode[offOpcode + 6],
3062 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3063# endif
3064 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3065 return VINF_SUCCESS;
3066 }
3067 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3068}
3069
3070#else /* IEM_WITH_SETJMP */
3071
3072/**
3073 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3074 *
3075 * @returns The opcode qword.
3076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3077 */
3078DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3079{
3080# ifdef IEM_WITH_CODE_TLB
3081 uint64_t u64;
3082 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3083 return u64;
3084# else
3085 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3086 if (rcStrict == VINF_SUCCESS)
3087 {
3088 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3089 pVCpu->iem.s.offOpcode = offOpcode + 8;
3090# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3091 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3092# else
3093 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3094 pVCpu->iem.s.abOpcode[offOpcode + 1],
3095 pVCpu->iem.s.abOpcode[offOpcode + 2],
3096 pVCpu->iem.s.abOpcode[offOpcode + 3],
3097 pVCpu->iem.s.abOpcode[offOpcode + 4],
3098 pVCpu->iem.s.abOpcode[offOpcode + 5],
3099 pVCpu->iem.s.abOpcode[offOpcode + 6],
3100 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3101# endif
3102 }
3103 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3104# endif
3105}
3106
3107
3108/**
3109 * Fetches the next opcode qword, longjmp on error.
3110 *
3111 * @returns The opcode qword.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 */
3114DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3115{
3116# ifdef IEM_WITH_CODE_TLB
3117 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3118 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3119 if (RT_LIKELY( pbBuf != NULL
3120 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3121 {
3122 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3123# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3124 return *(uint64_t const *)&pbBuf[offBuf];
3125# else
3126 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3127 pbBuf[offBuf + 1],
3128 pbBuf[offBuf + 2],
3129 pbBuf[offBuf + 3],
3130 pbBuf[offBuf + 4],
3131 pbBuf[offBuf + 5],
3132 pbBuf[offBuf + 6],
3133 pbBuf[offBuf + 7]);
3134# endif
3135 }
3136# else
3137 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3138 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3139 {
3140 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3141# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3142 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3143# else
3144 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3145 pVCpu->iem.s.abOpcode[offOpcode + 1],
3146 pVCpu->iem.s.abOpcode[offOpcode + 2],
3147 pVCpu->iem.s.abOpcode[offOpcode + 3],
3148 pVCpu->iem.s.abOpcode[offOpcode + 4],
3149 pVCpu->iem.s.abOpcode[offOpcode + 5],
3150 pVCpu->iem.s.abOpcode[offOpcode + 6],
3151 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3152# endif
3153 }
3154# endif
3155 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3156}
3157
3158#endif /* IEM_WITH_SETJMP */
3159
3160/**
3161 * Fetches the next opcode quad word, returns automatically on failure.
3162 *
3163 * @param a_pu64 Where to return the opcode quad word.
3164 * @remark Implicitly references pVCpu.
3165 */
3166#ifndef IEM_WITH_SETJMP
3167# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3168 do \
3169 { \
3170 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3171 if (rcStrict2 != VINF_SUCCESS) \
3172 return rcStrict2; \
3173 } while (0)
3174#else
3175# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3176#endif
3177
3178
3179/** @name Misc Worker Functions.
3180 * @{
3181 */
3182
3183/**
3184 * Gets the exception class for the specified exception vector.
3185 *
3186 * @returns The class of the specified exception.
3187 * @param uVector The exception vector.
3188 */
3189IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3190{
3191 Assert(uVector <= X86_XCPT_LAST);
3192 switch (uVector)
3193 {
3194 case X86_XCPT_DE:
3195 case X86_XCPT_TS:
3196 case X86_XCPT_NP:
3197 case X86_XCPT_SS:
3198 case X86_XCPT_GP:
3199 case X86_XCPT_SX: /* AMD only */
3200 return IEMXCPTCLASS_CONTRIBUTORY;
3201
3202 case X86_XCPT_PF:
3203 case X86_XCPT_VE: /* Intel only */
3204 return IEMXCPTCLASS_PAGE_FAULT;
3205
3206 case X86_XCPT_DF:
3207 return IEMXCPTCLASS_DOUBLE_FAULT;
3208 }
3209 return IEMXCPTCLASS_BENIGN;
3210}
3211
3212
3213/**
3214 * Evaluates how to handle an exception caused during delivery of another event
3215 * (exception / interrupt).
3216 *
3217 * @returns How to handle the recursive exception.
3218 * @param pVCpu The cross context virtual CPU structure of the
3219 * calling thread.
3220 * @param fPrevFlags The flags of the previous event.
3221 * @param uPrevVector The vector of the previous event.
3222 * @param fCurFlags The flags of the current exception.
3223 * @param uCurVector The vector of the current exception.
3224 * @param pfXcptRaiseInfo Where to store additional information about the
3225 * exception condition. Optional.
3226 */
3227VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3228 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3229{
3230 /*
3231 * Only CPU exceptions can be raised while delivering other events, software interrupt
3232 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3233 */
3234 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3235 Assert(pVCpu); RT_NOREF(pVCpu);
3236 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3237
3238 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3239 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3240 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3241 {
3242 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3243 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3244 {
3245 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3246 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3247 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3248 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3249 {
3250 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3251 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3252 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3253 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3254 uCurVector, IEM_GET_CTX(pVCpu)->cr2));
3255 }
3256 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3257 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3258 {
3259 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3260 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3261 }
3262 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3263 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3264 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3265 {
3266 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3267 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3268 }
3269 }
3270 else
3271 {
3272 if (uPrevVector == X86_XCPT_NMI)
3273 {
3274 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3275 if (uCurVector == X86_XCPT_PF)
3276 {
3277 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3278 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3279 }
3280 }
3281 else if ( uPrevVector == X86_XCPT_AC
3282 && uCurVector == X86_XCPT_AC)
3283 {
3284 enmRaise = IEMXCPTRAISE_CPU_HANG;
3285 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3286 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3287 }
3288 }
3289 }
3290 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3291 {
3292 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3293 if (uCurVector == X86_XCPT_PF)
3294 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3295 }
3296 else
3297 {
3298 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3299 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3300 }
3301
3302 if (pfXcptRaiseInfo)
3303 *pfXcptRaiseInfo = fRaiseInfo;
3304 return enmRaise;
3305}
3306
3307
3308/**
3309 * Enters the CPU shutdown state initiated by a triple fault or other
3310 * unrecoverable conditions.
3311 *
3312 * @returns Strict VBox status code.
3313 * @param pVCpu The cross context virtual CPU structure of the
3314 * calling thread.
3315 */
3316IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3317{
3318 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3319 {
3320 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3321 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3322 }
3323
3324 RT_NOREF(pVCpu);
3325 return VINF_EM_TRIPLE_FAULT;
3326}
3327
3328
3329/**
3330 * Validates a new SS segment.
3331 *
3332 * @returns VBox strict status code.
3333 * @param pVCpu The cross context virtual CPU structure of the
3334 * calling thread.
3335 * @param pCtx The CPU context.
3336 * @param NewSS The new SS selctor.
3337 * @param uCpl The CPL to load the stack for.
3338 * @param pDesc Where to return the descriptor.
3339 */
3340IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, PCCPUMCTX pCtx, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3341{
3342 NOREF(pCtx);
3343
3344 /* Null selectors are not allowed (we're not called for dispatching
3345 interrupts with SS=0 in long mode). */
3346 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3347 {
3348 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3349 return iemRaiseTaskSwitchFault0(pVCpu);
3350 }
3351
3352 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3353 if ((NewSS & X86_SEL_RPL) != uCpl)
3354 {
3355 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3356 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3357 }
3358
3359 /*
3360 * Read the descriptor.
3361 */
3362 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3363 if (rcStrict != VINF_SUCCESS)
3364 return rcStrict;
3365
3366 /*
3367 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3368 */
3369 if (!pDesc->Legacy.Gen.u1DescType)
3370 {
3371 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3372 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3373 }
3374
3375 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3376 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3377 {
3378 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3379 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3380 }
3381 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3382 {
3383 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3384 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3385 }
3386
3387 /* Is it there? */
3388 /** @todo testcase: Is this checked before the canonical / limit check below? */
3389 if (!pDesc->Legacy.Gen.u1Present)
3390 {
3391 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3392 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3393 }
3394
3395 return VINF_SUCCESS;
3396}
3397
3398
3399/**
3400 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3401 * not.
3402 *
3403 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3404 * @param a_pCtx The CPU context.
3405 */
3406#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3407# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) ( CPUMRawGetEFlags(a_pVCpu) )
3408#else
3409# define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) ( (a_pCtx)->eflags.u )
3410#endif
3411
3412/**
3413 * Updates the EFLAGS in the correct manner wrt. PATM.
3414 *
3415 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3416 * @param a_pCtx The CPU context.
3417 * @param a_fEfl The new EFLAGS.
3418 */
3419#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3420# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3421#else
3422# define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) do { (a_pCtx)->eflags.u = (a_fEfl); } while (0)
3423#endif
3424
3425
3426/** @} */
3427
3428/** @name Raising Exceptions.
3429 *
3430 * @{
3431 */
3432
3433
3434/**
3435 * Loads the specified stack far pointer from the TSS.
3436 *
3437 * @returns VBox strict status code.
3438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3439 * @param pCtx The CPU context.
3440 * @param uCpl The CPL to load the stack for.
3441 * @param pSelSS Where to return the new stack segment.
3442 * @param puEsp Where to return the new stack pointer.
3443 */
3444IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl,
3445 PRTSEL pSelSS, uint32_t *puEsp)
3446{
3447 VBOXSTRICTRC rcStrict;
3448 Assert(uCpl < 4);
3449
3450 IEM_CTX_IMPORT_RET(pVCpu, (PCPUMCTX)pCtx, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3451 switch (pCtx->tr.Attr.n.u4Type)
3452 {
3453 /*
3454 * 16-bit TSS (X86TSS16).
3455 */
3456 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3457 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3458 {
3459 uint32_t off = uCpl * 4 + 2;
3460 if (off + 4 <= pCtx->tr.u32Limit)
3461 {
3462 /** @todo check actual access pattern here. */
3463 uint32_t u32Tmp = 0; /* gcc maybe... */
3464 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3465 if (rcStrict == VINF_SUCCESS)
3466 {
3467 *puEsp = RT_LOWORD(u32Tmp);
3468 *pSelSS = RT_HIWORD(u32Tmp);
3469 return VINF_SUCCESS;
3470 }
3471 }
3472 else
3473 {
3474 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3475 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3476 }
3477 break;
3478 }
3479
3480 /*
3481 * 32-bit TSS (X86TSS32).
3482 */
3483 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3484 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3485 {
3486 uint32_t off = uCpl * 8 + 4;
3487 if (off + 7 <= pCtx->tr.u32Limit)
3488 {
3489/** @todo check actual access pattern here. */
3490 uint64_t u64Tmp;
3491 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pCtx->tr.u64Base + off);
3492 if (rcStrict == VINF_SUCCESS)
3493 {
3494 *puEsp = u64Tmp & UINT32_MAX;
3495 *pSelSS = (RTSEL)(u64Tmp >> 32);
3496 return VINF_SUCCESS;
3497 }
3498 }
3499 else
3500 {
3501 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pCtx->tr.u32Limit));
3502 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3503 }
3504 break;
3505 }
3506
3507 default:
3508 AssertFailed();
3509 rcStrict = VERR_IEM_IPE_4;
3510 break;
3511 }
3512
3513 *puEsp = 0; /* make gcc happy */
3514 *pSelSS = 0; /* make gcc happy */
3515 return rcStrict;
3516}
3517
3518
3519/**
3520 * Loads the specified stack pointer from the 64-bit TSS.
3521 *
3522 * @returns VBox strict status code.
3523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3524 * @param pCtx The CPU context.
3525 * @param uCpl The CPL to load the stack for.
3526 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3527 * @param puRsp Where to return the new stack pointer.
3528 */
3529IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3530{
3531 Assert(uCpl < 4);
3532 Assert(uIst < 8);
3533 *puRsp = 0; /* make gcc happy */
3534
3535 IEM_CTX_IMPORT_RET(pVCpu, (PCPUMCTX)pCtx, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3536 AssertReturn(pCtx->tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3537
3538 uint32_t off;
3539 if (uIst)
3540 off = (uIst - 1) * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, ist1);
3541 else
3542 off = uCpl * sizeof(uint64_t) + RT_OFFSETOF(X86TSS64, rsp0);
3543 if (off + sizeof(uint64_t) > pCtx->tr.u32Limit)
3544 {
3545 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pCtx->tr.u32Limit));
3546 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3547 }
3548
3549 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pCtx->tr.u64Base + off);
3550}
3551
3552
3553/**
3554 * Adjust the CPU state according to the exception being raised.
3555 *
3556 * @param pCtx The CPU context.
3557 * @param u8Vector The exception that has been raised.
3558 */
3559DECLINLINE(void) iemRaiseXcptAdjustState(PCPUMCTX pCtx, uint8_t u8Vector)
3560{
3561 switch (u8Vector)
3562 {
3563 case X86_XCPT_DB:
3564 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_DR7);
3565 pCtx->dr[7] &= ~X86_DR7_GD;
3566 break;
3567 /** @todo Read the AMD and Intel exception reference... */
3568 }
3569}
3570
3571
3572/**
3573 * Implements exceptions and interrupts for real mode.
3574 *
3575 * @returns VBox strict status code.
3576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3577 * @param pCtx The CPU context.
3578 * @param cbInstr The number of bytes to offset rIP by in the return
3579 * address.
3580 * @param u8Vector The interrupt / exception vector number.
3581 * @param fFlags The flags.
3582 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3583 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3584 */
3585IEM_STATIC VBOXSTRICTRC
3586iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3587 PCPUMCTX pCtx,
3588 uint8_t cbInstr,
3589 uint8_t u8Vector,
3590 uint32_t fFlags,
3591 uint16_t uErr,
3592 uint64_t uCr2)
3593{
3594 NOREF(uErr); NOREF(uCr2);
3595 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3596
3597 /*
3598 * Read the IDT entry.
3599 */
3600 if (pCtx->idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3601 {
3602 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
3603 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3604 }
3605 RTFAR16 Idte;
3606 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pCtx->idtr.pIdt + UINT32_C(4) * u8Vector);
3607 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3608 {
3609 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3610 return rcStrict;
3611 }
3612
3613 /*
3614 * Push the stack frame.
3615 */
3616 uint16_t *pu16Frame;
3617 uint64_t uNewRsp;
3618 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3619 if (rcStrict != VINF_SUCCESS)
3620 return rcStrict;
3621
3622 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
3623#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3624 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3625 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3626 fEfl |= UINT16_C(0xf000);
3627#endif
3628 pu16Frame[2] = (uint16_t)fEfl;
3629 pu16Frame[1] = (uint16_t)pCtx->cs.Sel;
3630 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
3631 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3632 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3633 return rcStrict;
3634
3635 /*
3636 * Load the vector address into cs:ip and make exception specific state
3637 * adjustments.
3638 */
3639 pCtx->cs.Sel = Idte.sel;
3640 pCtx->cs.ValidSel = Idte.sel;
3641 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
3642 pCtx->cs.u64Base = (uint32_t)Idte.sel << 4;
3643 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3644 pCtx->rip = Idte.off;
3645 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3646 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
3647
3648 /** @todo do we actually do this in real mode? */
3649 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3650 iemRaiseXcptAdjustState(pCtx, u8Vector);
3651
3652 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3653}
3654
3655
3656/**
3657 * Loads a NULL data selector into when coming from V8086 mode.
3658 *
3659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3660 * @param pSReg Pointer to the segment register.
3661 */
3662IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3663{
3664 pSReg->Sel = 0;
3665 pSReg->ValidSel = 0;
3666 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3667 {
3668 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3669 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3670 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3671 }
3672 else
3673 {
3674 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3675 /** @todo check this on AMD-V */
3676 pSReg->u64Base = 0;
3677 pSReg->u32Limit = 0;
3678 }
3679}
3680
3681
3682/**
3683 * Loads a segment selector during a task switch in V8086 mode.
3684 *
3685 * @param pSReg Pointer to the segment register.
3686 * @param uSel The selector value to load.
3687 */
3688IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3689{
3690 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3691 pSReg->Sel = uSel;
3692 pSReg->ValidSel = uSel;
3693 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3694 pSReg->u64Base = uSel << 4;
3695 pSReg->u32Limit = 0xffff;
3696 pSReg->Attr.u = 0xf3;
3697}
3698
3699
3700/**
3701 * Loads a NULL data selector into a selector register, both the hidden and
3702 * visible parts, in protected mode.
3703 *
3704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3705 * @param pSReg Pointer to the segment register.
3706 * @param uRpl The RPL.
3707 */
3708IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3709{
3710 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3711 * data selector in protected mode. */
3712 pSReg->Sel = uRpl;
3713 pSReg->ValidSel = uRpl;
3714 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3715 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3716 {
3717 /* VT-x (Intel 3960x) observed doing something like this. */
3718 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3719 pSReg->u32Limit = UINT32_MAX;
3720 pSReg->u64Base = 0;
3721 }
3722 else
3723 {
3724 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3725 pSReg->u32Limit = 0;
3726 pSReg->u64Base = 0;
3727 }
3728}
3729
3730
3731/**
3732 * Loads a segment selector during a task switch in protected mode.
3733 *
3734 * In this task switch scenario, we would throw \#TS exceptions rather than
3735 * \#GPs.
3736 *
3737 * @returns VBox strict status code.
3738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3739 * @param pSReg Pointer to the segment register.
3740 * @param uSel The new selector value.
3741 *
3742 * @remarks This does _not_ handle CS or SS.
3743 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3744 */
3745IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3746{
3747 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3748
3749 /* Null data selector. */
3750 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3751 {
3752 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3753 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3754 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3755 return VINF_SUCCESS;
3756 }
3757
3758 /* Fetch the descriptor. */
3759 IEMSELDESC Desc;
3760 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3761 if (rcStrict != VINF_SUCCESS)
3762 {
3763 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3764 VBOXSTRICTRC_VAL(rcStrict)));
3765 return rcStrict;
3766 }
3767
3768 /* Must be a data segment or readable code segment. */
3769 if ( !Desc.Legacy.Gen.u1DescType
3770 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3771 {
3772 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3773 Desc.Legacy.Gen.u4Type));
3774 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3775 }
3776
3777 /* Check privileges for data segments and non-conforming code segments. */
3778 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3779 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3780 {
3781 /* The RPL and the new CPL must be less than or equal to the DPL. */
3782 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3783 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3784 {
3785 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3786 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3787 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3788 }
3789 }
3790
3791 /* Is it there? */
3792 if (!Desc.Legacy.Gen.u1Present)
3793 {
3794 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3795 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3796 }
3797
3798 /* The base and limit. */
3799 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3800 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3801
3802 /*
3803 * Ok, everything checked out fine. Now set the accessed bit before
3804 * committing the result into the registers.
3805 */
3806 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3807 {
3808 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3809 if (rcStrict != VINF_SUCCESS)
3810 return rcStrict;
3811 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3812 }
3813
3814 /* Commit */
3815 pSReg->Sel = uSel;
3816 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3817 pSReg->u32Limit = cbLimit;
3818 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3819 pSReg->ValidSel = uSel;
3820 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3821 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3822 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3823
3824 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3825 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3826 return VINF_SUCCESS;
3827}
3828
3829
3830/**
3831 * Performs a task switch.
3832 *
3833 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3834 * caller is responsible for performing the necessary checks (like DPL, TSS
3835 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3836 * reference for JMP, CALL, IRET.
3837 *
3838 * If the task switch is the due to a software interrupt or hardware exception,
3839 * the caller is responsible for validating the TSS selector and descriptor. See
3840 * Intel Instruction reference for INT n.
3841 *
3842 * @returns VBox strict status code.
3843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3844 * @param pCtx The CPU context.
3845 * @param enmTaskSwitch What caused this task switch.
3846 * @param uNextEip The EIP effective after the task switch.
3847 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3848 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3849 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3850 * @param SelTSS The TSS selector of the new task.
3851 * @param pNewDescTSS Pointer to the new TSS descriptor.
3852 */
3853IEM_STATIC VBOXSTRICTRC
3854iemTaskSwitch(PVMCPU pVCpu,
3855 PCPUMCTX pCtx,
3856 IEMTASKSWITCH enmTaskSwitch,
3857 uint32_t uNextEip,
3858 uint32_t fFlags,
3859 uint16_t uErr,
3860 uint64_t uCr2,
3861 RTSEL SelTSS,
3862 PIEMSELDESC pNewDescTSS)
3863{
3864 Assert(!IEM_IS_REAL_MODE(pVCpu));
3865 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3866 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3867
3868 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3869 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3870 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3871 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3872 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3873
3874 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3875 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3876
3877 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3878 fIsNewTSS386, pCtx->eip, uNextEip));
3879
3880 /* Update CR2 in case it's a page-fault. */
3881 /** @todo This should probably be done much earlier in IEM/PGM. See
3882 * @bugref{5653#c49}. */
3883 if (fFlags & IEM_XCPT_FLAGS_CR2)
3884 pCtx->cr2 = uCr2;
3885
3886 /*
3887 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3888 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3889 */
3890 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3891 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3892 if (uNewTSSLimit < uNewTSSLimitMin)
3893 {
3894 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3895 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3896 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3897 }
3898
3899 /*
3900 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3901 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3902 */
3903 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3904 {
3905 uint32_t const uExitInfo1 = SelTSS;
3906 uint32_t uExitInfo2 = uErr;
3907 switch (enmTaskSwitch)
3908 {
3909 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3910 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3911 default: break;
3912 }
3913 if (fFlags & IEM_XCPT_FLAGS_ERR)
3914 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3915 if (pCtx->eflags.Bits.u1RF)
3916 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3917
3918 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3919 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3920 RT_NOREF2(uExitInfo1, uExitInfo2);
3921 }
3922 /** @todo Nested-VMX task-switch intercept. */
3923
3924 /*
3925 * Check the current TSS limit. The last written byte to the current TSS during the
3926 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3927 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3928 *
3929 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3930 * end up with smaller than "legal" TSS limits.
3931 */
3932 uint32_t const uCurTSSLimit = pCtx->tr.u32Limit;
3933 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3934 if (uCurTSSLimit < uCurTSSLimitMin)
3935 {
3936 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
3937 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
3938 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3939 }
3940
3941 /*
3942 * Verify that the new TSS can be accessed and map it. Map only the required contents
3943 * and not the entire TSS.
3944 */
3945 void *pvNewTSS;
3946 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
3947 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
3948 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
3949 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
3950 * not perform correct translation if this happens. See Intel spec. 7.2.1
3951 * "Task-State Segment" */
3952 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
3953 if (rcStrict != VINF_SUCCESS)
3954 {
3955 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
3956 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
3957 return rcStrict;
3958 }
3959
3960 /*
3961 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
3962 */
3963 uint32_t u32EFlags = pCtx->eflags.u32;
3964 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
3965 || enmTaskSwitch == IEMTASKSWITCH_IRET)
3966 {
3967 PX86DESC pDescCurTSS;
3968 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
3969 pCtx->gdtr.pGdt + (pCtx->tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
3970 if (rcStrict != VINF_SUCCESS)
3971 {
3972 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3973 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3974 return rcStrict;
3975 }
3976
3977 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
3978 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
3979 if (rcStrict != VINF_SUCCESS)
3980 {
3981 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
3982 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
3983 return rcStrict;
3984 }
3985
3986 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
3987 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
3988 {
3989 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3990 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3991 u32EFlags &= ~X86_EFL_NT;
3992 }
3993 }
3994
3995 /*
3996 * Save the CPU state into the current TSS.
3997 */
3998 RTGCPTR GCPtrCurTSS = pCtx->tr.u64Base;
3999 if (GCPtrNewTSS == GCPtrCurTSS)
4000 {
4001 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4002 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4003 pCtx->cr3, pCtx->eip, pCtx->eflags.u32, pCtx->eax, pCtx->esp, pCtx->ebp, pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ldtr.Sel));
4004 }
4005 if (fIsNewTSS386)
4006 {
4007 /*
4008 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4009 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4010 */
4011 void *pvCurTSS32;
4012 uint32_t offCurTSS = RT_OFFSETOF(X86TSS32, eip);
4013 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS32, selLdt) - RT_OFFSETOF(X86TSS32, eip);
4014 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4015 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4016 if (rcStrict != VINF_SUCCESS)
4017 {
4018 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4019 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4020 return rcStrict;
4021 }
4022
4023 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4024 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4025 pCurTSS32->eip = uNextEip;
4026 pCurTSS32->eflags = u32EFlags;
4027 pCurTSS32->eax = pCtx->eax;
4028 pCurTSS32->ecx = pCtx->ecx;
4029 pCurTSS32->edx = pCtx->edx;
4030 pCurTSS32->ebx = pCtx->ebx;
4031 pCurTSS32->esp = pCtx->esp;
4032 pCurTSS32->ebp = pCtx->ebp;
4033 pCurTSS32->esi = pCtx->esi;
4034 pCurTSS32->edi = pCtx->edi;
4035 pCurTSS32->es = pCtx->es.Sel;
4036 pCurTSS32->cs = pCtx->cs.Sel;
4037 pCurTSS32->ss = pCtx->ss.Sel;
4038 pCurTSS32->ds = pCtx->ds.Sel;
4039 pCurTSS32->fs = pCtx->fs.Sel;
4040 pCurTSS32->gs = pCtx->gs.Sel;
4041
4042 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4043 if (rcStrict != VINF_SUCCESS)
4044 {
4045 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4046 VBOXSTRICTRC_VAL(rcStrict)));
4047 return rcStrict;
4048 }
4049 }
4050 else
4051 {
4052 /*
4053 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4054 */
4055 void *pvCurTSS16;
4056 uint32_t offCurTSS = RT_OFFSETOF(X86TSS16, ip);
4057 uint32_t cbCurTSS = RT_OFFSETOF(X86TSS16, selLdt) - RT_OFFSETOF(X86TSS16, ip);
4058 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4059 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4060 if (rcStrict != VINF_SUCCESS)
4061 {
4062 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4063 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4064 return rcStrict;
4065 }
4066
4067 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4068 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4069 pCurTSS16->ip = uNextEip;
4070 pCurTSS16->flags = u32EFlags;
4071 pCurTSS16->ax = pCtx->ax;
4072 pCurTSS16->cx = pCtx->cx;
4073 pCurTSS16->dx = pCtx->dx;
4074 pCurTSS16->bx = pCtx->bx;
4075 pCurTSS16->sp = pCtx->sp;
4076 pCurTSS16->bp = pCtx->bp;
4077 pCurTSS16->si = pCtx->si;
4078 pCurTSS16->di = pCtx->di;
4079 pCurTSS16->es = pCtx->es.Sel;
4080 pCurTSS16->cs = pCtx->cs.Sel;
4081 pCurTSS16->ss = pCtx->ss.Sel;
4082 pCurTSS16->ds = pCtx->ds.Sel;
4083
4084 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4085 if (rcStrict != VINF_SUCCESS)
4086 {
4087 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4088 VBOXSTRICTRC_VAL(rcStrict)));
4089 return rcStrict;
4090 }
4091 }
4092
4093 /*
4094 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4095 */
4096 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4097 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4098 {
4099 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4100 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4101 pNewTSS->selPrev = pCtx->tr.Sel;
4102 }
4103
4104 /*
4105 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4106 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4107 */
4108 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4109 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4110 bool fNewDebugTrap;
4111 if (fIsNewTSS386)
4112 {
4113 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4114 uNewCr3 = (pCtx->cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4115 uNewEip = pNewTSS32->eip;
4116 uNewEflags = pNewTSS32->eflags;
4117 uNewEax = pNewTSS32->eax;
4118 uNewEcx = pNewTSS32->ecx;
4119 uNewEdx = pNewTSS32->edx;
4120 uNewEbx = pNewTSS32->ebx;
4121 uNewEsp = pNewTSS32->esp;
4122 uNewEbp = pNewTSS32->ebp;
4123 uNewEsi = pNewTSS32->esi;
4124 uNewEdi = pNewTSS32->edi;
4125 uNewES = pNewTSS32->es;
4126 uNewCS = pNewTSS32->cs;
4127 uNewSS = pNewTSS32->ss;
4128 uNewDS = pNewTSS32->ds;
4129 uNewFS = pNewTSS32->fs;
4130 uNewGS = pNewTSS32->gs;
4131 uNewLdt = pNewTSS32->selLdt;
4132 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4133 }
4134 else
4135 {
4136 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4137 uNewCr3 = 0;
4138 uNewEip = pNewTSS16->ip;
4139 uNewEflags = pNewTSS16->flags;
4140 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4141 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4142 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4143 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4144 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4145 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4146 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4147 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4148 uNewES = pNewTSS16->es;
4149 uNewCS = pNewTSS16->cs;
4150 uNewSS = pNewTSS16->ss;
4151 uNewDS = pNewTSS16->ds;
4152 uNewFS = 0;
4153 uNewGS = 0;
4154 uNewLdt = pNewTSS16->selLdt;
4155 fNewDebugTrap = false;
4156 }
4157
4158 if (GCPtrNewTSS == GCPtrCurTSS)
4159 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4160 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4161
4162 /*
4163 * We're done accessing the new TSS.
4164 */
4165 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4166 if (rcStrict != VINF_SUCCESS)
4167 {
4168 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4169 return rcStrict;
4170 }
4171
4172 /*
4173 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4174 */
4175 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4176 {
4177 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4178 pCtx->gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4179 if (rcStrict != VINF_SUCCESS)
4180 {
4181 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4182 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4183 return rcStrict;
4184 }
4185
4186 /* Check that the descriptor indicates the new TSS is available (not busy). */
4187 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4188 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4189 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4190
4191 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4192 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4193 if (rcStrict != VINF_SUCCESS)
4194 {
4195 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4196 enmTaskSwitch, pCtx->gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4197 return rcStrict;
4198 }
4199 }
4200
4201 /*
4202 * From this point on, we're technically in the new task. We will defer exceptions
4203 * until the completion of the task switch but before executing any instructions in the new task.
4204 */
4205 pCtx->tr.Sel = SelTSS;
4206 pCtx->tr.ValidSel = SelTSS;
4207 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID;
4208 pCtx->tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4209 pCtx->tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4210 pCtx->tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4211 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4212
4213 /* Set the busy bit in TR. */
4214 pCtx->tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4215 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4216 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4217 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4218 {
4219 uNewEflags |= X86_EFL_NT;
4220 }
4221
4222 pCtx->dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4223 pCtx->cr0 |= X86_CR0_TS;
4224 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4225
4226 pCtx->eip = uNewEip;
4227 pCtx->eax = uNewEax;
4228 pCtx->ecx = uNewEcx;
4229 pCtx->edx = uNewEdx;
4230 pCtx->ebx = uNewEbx;
4231 pCtx->esp = uNewEsp;
4232 pCtx->ebp = uNewEbp;
4233 pCtx->esi = uNewEsi;
4234 pCtx->edi = uNewEdi;
4235
4236 uNewEflags &= X86_EFL_LIVE_MASK;
4237 uNewEflags |= X86_EFL_RA1_MASK;
4238 IEMMISC_SET_EFL(pVCpu, pCtx, uNewEflags);
4239
4240 /*
4241 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4242 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4243 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4244 */
4245 pCtx->es.Sel = uNewES;
4246 pCtx->es.Attr.u &= ~X86DESCATTR_P;
4247
4248 pCtx->cs.Sel = uNewCS;
4249 pCtx->cs.Attr.u &= ~X86DESCATTR_P;
4250
4251 pCtx->ss.Sel = uNewSS;
4252 pCtx->ss.Attr.u &= ~X86DESCATTR_P;
4253
4254 pCtx->ds.Sel = uNewDS;
4255 pCtx->ds.Attr.u &= ~X86DESCATTR_P;
4256
4257 pCtx->fs.Sel = uNewFS;
4258 pCtx->fs.Attr.u &= ~X86DESCATTR_P;
4259
4260 pCtx->gs.Sel = uNewGS;
4261 pCtx->gs.Attr.u &= ~X86DESCATTR_P;
4262 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4263
4264 pCtx->ldtr.Sel = uNewLdt;
4265 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4266 pCtx->ldtr.Attr.u &= ~X86DESCATTR_P;
4267 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4268
4269 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4270 {
4271 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE;
4272 pCtx->cs.Attr.u |= X86DESCATTR_UNUSABLE;
4273 pCtx->ss.Attr.u |= X86DESCATTR_UNUSABLE;
4274 pCtx->ds.Attr.u |= X86DESCATTR_UNUSABLE;
4275 pCtx->fs.Attr.u |= X86DESCATTR_UNUSABLE;
4276 pCtx->gs.Attr.u |= X86DESCATTR_UNUSABLE;
4277 pCtx->ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4278 }
4279
4280 /*
4281 * Switch CR3 for the new task.
4282 */
4283 if ( fIsNewTSS386
4284 && (pCtx->cr0 & X86_CR0_PG))
4285 {
4286 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4287 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4288 AssertRCSuccessReturn(rc, rc);
4289
4290 /* Inform PGM. */
4291 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE));
4292 AssertRCReturn(rc, rc);
4293 /* ignore informational status codes */
4294
4295 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4296 }
4297
4298 /*
4299 * Switch LDTR for the new task.
4300 */
4301 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4302 iemHlpLoadNullDataSelectorProt(pVCpu, &pCtx->ldtr, uNewLdt);
4303 else
4304 {
4305 Assert(!pCtx->ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4306
4307 IEMSELDESC DescNewLdt;
4308 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4309 if (rcStrict != VINF_SUCCESS)
4310 {
4311 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4312 uNewLdt, pCtx->gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4313 return rcStrict;
4314 }
4315 if ( !DescNewLdt.Legacy.Gen.u1Present
4316 || DescNewLdt.Legacy.Gen.u1DescType
4317 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4318 {
4319 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4320 uNewLdt, DescNewLdt.Legacy.u));
4321 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4322 }
4323
4324 pCtx->ldtr.ValidSel = uNewLdt;
4325 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4326 pCtx->ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4327 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4328 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4329 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4330 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr));
4332 }
4333
4334 IEMSELDESC DescSS;
4335 if (IEM_IS_V86_MODE(pVCpu))
4336 {
4337 pVCpu->iem.s.uCpl = 3;
4338 iemHlpLoadSelectorInV86Mode(&pCtx->es, uNewES);
4339 iemHlpLoadSelectorInV86Mode(&pCtx->cs, uNewCS);
4340 iemHlpLoadSelectorInV86Mode(&pCtx->ss, uNewSS);
4341 iemHlpLoadSelectorInV86Mode(&pCtx->ds, uNewDS);
4342 iemHlpLoadSelectorInV86Mode(&pCtx->fs, uNewFS);
4343 iemHlpLoadSelectorInV86Mode(&pCtx->gs, uNewGS);
4344
4345 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4346 DescSS.Legacy.u = 0;
4347 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pCtx->ss.u32Limit;
4348 DescSS.Legacy.Gen.u4LimitHigh = pCtx->ss.u32Limit >> 16;
4349 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pCtx->ss.u64Base;
4350 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pCtx->ss.u64Base >> 16);
4351 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pCtx->ss.u64Base >> 24);
4352 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4353 DescSS.Legacy.Gen.u2Dpl = 3;
4354 }
4355 else
4356 {
4357 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4358
4359 /*
4360 * Load the stack segment for the new task.
4361 */
4362 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4363 {
4364 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4365 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4366 }
4367
4368 /* Fetch the descriptor. */
4369 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4370 if (rcStrict != VINF_SUCCESS)
4371 {
4372 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4373 VBOXSTRICTRC_VAL(rcStrict)));
4374 return rcStrict;
4375 }
4376
4377 /* SS must be a data segment and writable. */
4378 if ( !DescSS.Legacy.Gen.u1DescType
4379 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4380 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4381 {
4382 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4383 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4384 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4385 }
4386
4387 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4388 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4389 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4390 {
4391 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4392 uNewCpl));
4393 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4394 }
4395
4396 /* Is it there? */
4397 if (!DescSS.Legacy.Gen.u1Present)
4398 {
4399 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4400 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4401 }
4402
4403 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4404 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4405
4406 /* Set the accessed bit before committing the result into SS. */
4407 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4408 {
4409 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4410 if (rcStrict != VINF_SUCCESS)
4411 return rcStrict;
4412 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4413 }
4414
4415 /* Commit SS. */
4416 pCtx->ss.Sel = uNewSS;
4417 pCtx->ss.ValidSel = uNewSS;
4418 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4419 pCtx->ss.u32Limit = cbLimit;
4420 pCtx->ss.u64Base = u64Base;
4421 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4422 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss));
4423
4424 /* CPL has changed, update IEM before loading rest of segments. */
4425 pVCpu->iem.s.uCpl = uNewCpl;
4426
4427 /*
4428 * Load the data segments for the new task.
4429 */
4430 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->es, uNewES);
4431 if (rcStrict != VINF_SUCCESS)
4432 return rcStrict;
4433 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->ds, uNewDS);
4434 if (rcStrict != VINF_SUCCESS)
4435 return rcStrict;
4436 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->fs, uNewFS);
4437 if (rcStrict != VINF_SUCCESS)
4438 return rcStrict;
4439 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pCtx->gs, uNewGS);
4440 if (rcStrict != VINF_SUCCESS)
4441 return rcStrict;
4442
4443 /*
4444 * Load the code segment for the new task.
4445 */
4446 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4447 {
4448 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4449 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4450 }
4451
4452 /* Fetch the descriptor. */
4453 IEMSELDESC DescCS;
4454 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4455 if (rcStrict != VINF_SUCCESS)
4456 {
4457 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4458 return rcStrict;
4459 }
4460
4461 /* CS must be a code segment. */
4462 if ( !DescCS.Legacy.Gen.u1DescType
4463 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4464 {
4465 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4466 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4467 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4468 }
4469
4470 /* For conforming CS, DPL must be less than or equal to the RPL. */
4471 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4472 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4473 {
4474 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4475 DescCS.Legacy.Gen.u2Dpl));
4476 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4477 }
4478
4479 /* For non-conforming CS, DPL must match RPL. */
4480 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4481 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4482 {
4483 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4484 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4485 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4486 }
4487
4488 /* Is it there? */
4489 if (!DescCS.Legacy.Gen.u1Present)
4490 {
4491 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4492 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4493 }
4494
4495 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4496 u64Base = X86DESC_BASE(&DescCS.Legacy);
4497
4498 /* Set the accessed bit before committing the result into CS. */
4499 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4500 {
4501 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4502 if (rcStrict != VINF_SUCCESS)
4503 return rcStrict;
4504 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4505 }
4506
4507 /* Commit CS. */
4508 pCtx->cs.Sel = uNewCS;
4509 pCtx->cs.ValidSel = uNewCS;
4510 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4511 pCtx->cs.u32Limit = cbLimit;
4512 pCtx->cs.u64Base = u64Base;
4513 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
4514 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs));
4515 }
4516
4517 /** @todo Debug trap. */
4518 if (fIsNewTSS386 && fNewDebugTrap)
4519 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4520
4521 /*
4522 * Construct the error code masks based on what caused this task switch.
4523 * See Intel Instruction reference for INT.
4524 */
4525 uint16_t uExt;
4526 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4527 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4528 {
4529 uExt = 1;
4530 }
4531 else
4532 uExt = 0;
4533
4534 /*
4535 * Push any error code on to the new stack.
4536 */
4537 if (fFlags & IEM_XCPT_FLAGS_ERR)
4538 {
4539 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4540 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4541 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4542
4543 /* Check that there is sufficient space on the stack. */
4544 /** @todo Factor out segment limit checking for normal/expand down segments
4545 * into a separate function. */
4546 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4547 {
4548 if ( pCtx->esp - 1 > cbLimitSS
4549 || pCtx->esp < cbStackFrame)
4550 {
4551 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4552 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4553 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4554 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4555 }
4556 }
4557 else
4558 {
4559 if ( pCtx->esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4560 || pCtx->esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4561 {
4562 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4563 pCtx->ss.Sel, pCtx->esp, cbStackFrame));
4564 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4565 }
4566 }
4567
4568
4569 if (fIsNewTSS386)
4570 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4571 else
4572 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4573 if (rcStrict != VINF_SUCCESS)
4574 {
4575 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4576 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4577 return rcStrict;
4578 }
4579 }
4580
4581 /* Check the new EIP against the new CS limit. */
4582 if (pCtx->eip > pCtx->cs.u32Limit)
4583 {
4584 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4585 pCtx->eip, pCtx->cs.u32Limit));
4586 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4587 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4588 }
4589
4590 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pCtx->cs.Sel, pCtx->eip, pCtx->ss.Sel));
4591 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4592}
4593
4594
4595/**
4596 * Implements exceptions and interrupts for protected mode.
4597 *
4598 * @returns VBox strict status code.
4599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4600 * @param pCtx The CPU context.
4601 * @param cbInstr The number of bytes to offset rIP by in the return
4602 * address.
4603 * @param u8Vector The interrupt / exception vector number.
4604 * @param fFlags The flags.
4605 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4606 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4607 */
4608IEM_STATIC VBOXSTRICTRC
4609iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4610 PCPUMCTX pCtx,
4611 uint8_t cbInstr,
4612 uint8_t u8Vector,
4613 uint32_t fFlags,
4614 uint16_t uErr,
4615 uint64_t uCr2)
4616{
4617 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4618
4619 /*
4620 * Read the IDT entry.
4621 */
4622 if (pCtx->idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4623 {
4624 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
4625 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4626 }
4627 X86DESC Idte;
4628 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4629 pCtx->idtr.pIdt + UINT32_C(8) * u8Vector);
4630 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4631 {
4632 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4633 return rcStrict;
4634 }
4635 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4636 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4637 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4638
4639 /*
4640 * Check the descriptor type, DPL and such.
4641 * ASSUMES this is done in the same order as described for call-gate calls.
4642 */
4643 if (Idte.Gate.u1DescType)
4644 {
4645 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4646 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4647 }
4648 bool fTaskGate = false;
4649 uint8_t f32BitGate = true;
4650 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4651 switch (Idte.Gate.u4Type)
4652 {
4653 case X86_SEL_TYPE_SYS_UNDEFINED:
4654 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4655 case X86_SEL_TYPE_SYS_LDT:
4656 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4657 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4658 case X86_SEL_TYPE_SYS_UNDEFINED2:
4659 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4660 case X86_SEL_TYPE_SYS_UNDEFINED3:
4661 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4662 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4663 case X86_SEL_TYPE_SYS_UNDEFINED4:
4664 {
4665 /** @todo check what actually happens when the type is wrong...
4666 * esp. call gates. */
4667 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4668 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4669 }
4670
4671 case X86_SEL_TYPE_SYS_286_INT_GATE:
4672 f32BitGate = false;
4673 RT_FALL_THRU();
4674 case X86_SEL_TYPE_SYS_386_INT_GATE:
4675 fEflToClear |= X86_EFL_IF;
4676 break;
4677
4678 case X86_SEL_TYPE_SYS_TASK_GATE:
4679 fTaskGate = true;
4680#ifndef IEM_IMPLEMENTS_TASKSWITCH
4681 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4682#endif
4683 break;
4684
4685 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4686 f32BitGate = false;
4687 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4688 break;
4689
4690 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4691 }
4692
4693 /* Check DPL against CPL if applicable. */
4694 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4695 {
4696 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4697 {
4698 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4699 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4700 }
4701 }
4702
4703 /* Is it there? */
4704 if (!Idte.Gate.u1Present)
4705 {
4706 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4707 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4708 }
4709
4710 /* Is it a task-gate? */
4711 if (fTaskGate)
4712 {
4713 /*
4714 * Construct the error code masks based on what caused this task switch.
4715 * See Intel Instruction reference for INT.
4716 */
4717 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4718 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4719 RTSEL SelTSS = Idte.Gate.u16Sel;
4720
4721 /*
4722 * Fetch the TSS descriptor in the GDT.
4723 */
4724 IEMSELDESC DescTSS;
4725 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4726 if (rcStrict != VINF_SUCCESS)
4727 {
4728 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4729 VBOXSTRICTRC_VAL(rcStrict)));
4730 return rcStrict;
4731 }
4732
4733 /* The TSS descriptor must be a system segment and be available (not busy). */
4734 if ( DescTSS.Legacy.Gen.u1DescType
4735 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4736 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4737 {
4738 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4739 u8Vector, SelTSS, DescTSS.Legacy.au64));
4740 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4741 }
4742
4743 /* The TSS must be present. */
4744 if (!DescTSS.Legacy.Gen.u1Present)
4745 {
4746 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4747 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4748 }
4749
4750 /* Do the actual task switch. */
4751 return iemTaskSwitch(pVCpu, pCtx, IEMTASKSWITCH_INT_XCPT, pCtx->eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4752 }
4753
4754 /* A null CS is bad. */
4755 RTSEL NewCS = Idte.Gate.u16Sel;
4756 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4757 {
4758 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4759 return iemRaiseGeneralProtectionFault0(pVCpu);
4760 }
4761
4762 /* Fetch the descriptor for the new CS. */
4763 IEMSELDESC DescCS;
4764 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4765 if (rcStrict != VINF_SUCCESS)
4766 {
4767 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4768 return rcStrict;
4769 }
4770
4771 /* Must be a code segment. */
4772 if (!DescCS.Legacy.Gen.u1DescType)
4773 {
4774 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4775 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4776 }
4777 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4778 {
4779 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4780 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4781 }
4782
4783 /* Don't allow lowering the privilege level. */
4784 /** @todo Does the lowering of privileges apply to software interrupts
4785 * only? This has bearings on the more-privileged or
4786 * same-privilege stack behavior further down. A testcase would
4787 * be nice. */
4788 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4789 {
4790 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4791 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4792 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4793 }
4794
4795 /* Make sure the selector is present. */
4796 if (!DescCS.Legacy.Gen.u1Present)
4797 {
4798 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4799 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4800 }
4801
4802 /* Check the new EIP against the new CS limit. */
4803 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4804 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4805 ? Idte.Gate.u16OffsetLow
4806 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4807 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4808 if (uNewEip > cbLimitCS)
4809 {
4810 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4811 u8Vector, uNewEip, cbLimitCS, NewCS));
4812 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4813 }
4814 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4815
4816 /* Calc the flag image to push. */
4817 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
4818 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4819 fEfl &= ~X86_EFL_RF;
4820 else
4821 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4822
4823 /* From V8086 mode only go to CPL 0. */
4824 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4825 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4826 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4827 {
4828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4829 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4830 }
4831
4832 /*
4833 * If the privilege level changes, we need to get a new stack from the TSS.
4834 * This in turns means validating the new SS and ESP...
4835 */
4836 if (uNewCpl != pVCpu->iem.s.uCpl)
4837 {
4838 RTSEL NewSS;
4839 uint32_t uNewEsp;
4840 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, pCtx, uNewCpl, &NewSS, &uNewEsp);
4841 if (rcStrict != VINF_SUCCESS)
4842 return rcStrict;
4843
4844 IEMSELDESC DescSS;
4845 rcStrict = iemMiscValidateNewSS(pVCpu, pCtx, NewSS, uNewCpl, &DescSS);
4846 if (rcStrict != VINF_SUCCESS)
4847 return rcStrict;
4848 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4849 if (!DescSS.Legacy.Gen.u1DefBig)
4850 {
4851 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4852 uNewEsp = (uint16_t)uNewEsp;
4853 }
4854
4855 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pCtx->ss.Sel, pCtx->esp));
4856
4857 /* Check that there is sufficient space for the stack frame. */
4858 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4859 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4860 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4861 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4862
4863 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4864 {
4865 if ( uNewEsp - 1 > cbLimitSS
4866 || uNewEsp < cbStackFrame)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4869 u8Vector, NewSS, uNewEsp, cbStackFrame));
4870 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4871 }
4872 }
4873 else
4874 {
4875 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4876 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4877 {
4878 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4879 u8Vector, NewSS, uNewEsp, cbStackFrame));
4880 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4881 }
4882 }
4883
4884 /*
4885 * Start making changes.
4886 */
4887
4888 /* Set the new CPL so that stack accesses use it. */
4889 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4890 pVCpu->iem.s.uCpl = uNewCpl;
4891
4892 /* Create the stack frame. */
4893 RTPTRUNION uStackFrame;
4894 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4895 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4896 if (rcStrict != VINF_SUCCESS)
4897 return rcStrict;
4898 void * const pvStackFrame = uStackFrame.pv;
4899 if (f32BitGate)
4900 {
4901 if (fFlags & IEM_XCPT_FLAGS_ERR)
4902 *uStackFrame.pu32++ = uErr;
4903 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->eip + cbInstr : pCtx->eip;
4904 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4905 uStackFrame.pu32[2] = fEfl;
4906 uStackFrame.pu32[3] = pCtx->esp;
4907 uStackFrame.pu32[4] = pCtx->ss.Sel;
4908 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pCtx->ss.Sel, pCtx->esp));
4909 if (fEfl & X86_EFL_VM)
4910 {
4911 uStackFrame.pu32[1] = pCtx->cs.Sel;
4912 uStackFrame.pu32[5] = pCtx->es.Sel;
4913 uStackFrame.pu32[6] = pCtx->ds.Sel;
4914 uStackFrame.pu32[7] = pCtx->fs.Sel;
4915 uStackFrame.pu32[8] = pCtx->gs.Sel;
4916 }
4917 }
4918 else
4919 {
4920 if (fFlags & IEM_XCPT_FLAGS_ERR)
4921 *uStackFrame.pu16++ = uErr;
4922 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pCtx->ip + cbInstr : pCtx->ip;
4923 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4924 uStackFrame.pu16[2] = fEfl;
4925 uStackFrame.pu16[3] = pCtx->sp;
4926 uStackFrame.pu16[4] = pCtx->ss.Sel;
4927 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pCtx->ss.Sel, pCtx->sp));
4928 if (fEfl & X86_EFL_VM)
4929 {
4930 uStackFrame.pu16[1] = pCtx->cs.Sel;
4931 uStackFrame.pu16[5] = pCtx->es.Sel;
4932 uStackFrame.pu16[6] = pCtx->ds.Sel;
4933 uStackFrame.pu16[7] = pCtx->fs.Sel;
4934 uStackFrame.pu16[8] = pCtx->gs.Sel;
4935 }
4936 }
4937 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
4938 if (rcStrict != VINF_SUCCESS)
4939 return rcStrict;
4940
4941 /* Mark the selectors 'accessed' (hope this is the correct time). */
4942 /** @todo testcase: excatly _when_ are the accessed bits set - before or
4943 * after pushing the stack frame? (Write protect the gdt + stack to
4944 * find out.) */
4945 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4946 {
4947 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
4948 if (rcStrict != VINF_SUCCESS)
4949 return rcStrict;
4950 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4951 }
4952
4953 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4954 {
4955 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
4956 if (rcStrict != VINF_SUCCESS)
4957 return rcStrict;
4958 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4959 }
4960
4961 /*
4962 * Start comitting the register changes (joins with the DPL=CPL branch).
4963 */
4964 pCtx->ss.Sel = NewSS;
4965 pCtx->ss.ValidSel = NewSS;
4966 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
4967 pCtx->ss.u32Limit = cbLimitSS;
4968 pCtx->ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
4969 pCtx->ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4970 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
4971 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
4972 * SP is loaded).
4973 * Need to check the other combinations too:
4974 * - 16-bit TSS, 32-bit handler
4975 * - 32-bit TSS, 16-bit handler */
4976 if (!pCtx->ss.Attr.n.u1DefBig)
4977 pCtx->sp = (uint16_t)(uNewEsp - cbStackFrame);
4978 else
4979 pCtx->rsp = uNewEsp - cbStackFrame;
4980
4981 if (fEfl & X86_EFL_VM)
4982 {
4983 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->gs);
4984 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->fs);
4985 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->es);
4986 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pCtx->ds);
4987 }
4988 }
4989 /*
4990 * Same privilege, no stack change and smaller stack frame.
4991 */
4992 else
4993 {
4994 uint64_t uNewRsp;
4995 RTPTRUNION uStackFrame;
4996 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
4997 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
4998 if (rcStrict != VINF_SUCCESS)
4999 return rcStrict;
5000 void * const pvStackFrame = uStackFrame.pv;
5001
5002 if (f32BitGate)
5003 {
5004 if (fFlags & IEM_XCPT_FLAGS_ERR)
5005 *uStackFrame.pu32++ = uErr;
5006 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5007 uStackFrame.pu32[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5008 uStackFrame.pu32[2] = fEfl;
5009 }
5010 else
5011 {
5012 if (fFlags & IEM_XCPT_FLAGS_ERR)
5013 *uStackFrame.pu16++ = uErr;
5014 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->eip + cbInstr : pCtx->eip;
5015 uStackFrame.pu16[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5016 uStackFrame.pu16[2] = fEfl;
5017 }
5018 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5019 if (rcStrict != VINF_SUCCESS)
5020 return rcStrict;
5021
5022 /* Mark the CS selector as 'accessed'. */
5023 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5024 {
5025 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5026 if (rcStrict != VINF_SUCCESS)
5027 return rcStrict;
5028 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5029 }
5030
5031 /*
5032 * Start committing the register changes (joins with the other branch).
5033 */
5034 pCtx->rsp = uNewRsp;
5035 }
5036
5037 /* ... register committing continues. */
5038 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5039 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5040 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5041 pCtx->cs.u32Limit = cbLimitCS;
5042 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5043 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5044
5045 pCtx->rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5046 fEfl &= ~fEflToClear;
5047 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5048
5049 if (fFlags & IEM_XCPT_FLAGS_CR2)
5050 pCtx->cr2 = uCr2;
5051
5052 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5053 iemRaiseXcptAdjustState(pCtx, u8Vector);
5054
5055 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5056}
5057
5058
5059/**
5060 * Implements exceptions and interrupts for long mode.
5061 *
5062 * @returns VBox strict status code.
5063 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5064 * @param pCtx The CPU context.
5065 * @param cbInstr The number of bytes to offset rIP by in the return
5066 * address.
5067 * @param u8Vector The interrupt / exception vector number.
5068 * @param fFlags The flags.
5069 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5070 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5071 */
5072IEM_STATIC VBOXSTRICTRC
5073iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5074 PCPUMCTX pCtx,
5075 uint8_t cbInstr,
5076 uint8_t u8Vector,
5077 uint32_t fFlags,
5078 uint16_t uErr,
5079 uint64_t uCr2)
5080{
5081 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5082
5083 /*
5084 * Read the IDT entry.
5085 */
5086 uint16_t offIdt = (uint16_t)u8Vector << 4;
5087 if (pCtx->idtr.cbIdt < offIdt + 7)
5088 {
5089 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pCtx->idtr.cbIdt));
5090 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5091 }
5092 X86DESC64 Idte;
5093 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pCtx->idtr.pIdt + offIdt);
5094 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5095 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pCtx->idtr.pIdt + offIdt + 8);
5096 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5097 {
5098 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5099 return rcStrict;
5100 }
5101 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5102 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5103 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5104
5105 /*
5106 * Check the descriptor type, DPL and such.
5107 * ASSUMES this is done in the same order as described for call-gate calls.
5108 */
5109 if (Idte.Gate.u1DescType)
5110 {
5111 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5112 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5113 }
5114 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5115 switch (Idte.Gate.u4Type)
5116 {
5117 case AMD64_SEL_TYPE_SYS_INT_GATE:
5118 fEflToClear |= X86_EFL_IF;
5119 break;
5120 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5121 break;
5122
5123 default:
5124 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5125 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5126 }
5127
5128 /* Check DPL against CPL if applicable. */
5129 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5130 {
5131 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5132 {
5133 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5134 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5135 }
5136 }
5137
5138 /* Is it there? */
5139 if (!Idte.Gate.u1Present)
5140 {
5141 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5142 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5143 }
5144
5145 /* A null CS is bad. */
5146 RTSEL NewCS = Idte.Gate.u16Sel;
5147 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5148 {
5149 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5150 return iemRaiseGeneralProtectionFault0(pVCpu);
5151 }
5152
5153 /* Fetch the descriptor for the new CS. */
5154 IEMSELDESC DescCS;
5155 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5156 if (rcStrict != VINF_SUCCESS)
5157 {
5158 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5159 return rcStrict;
5160 }
5161
5162 /* Must be a 64-bit code segment. */
5163 if (!DescCS.Long.Gen.u1DescType)
5164 {
5165 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5166 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5167 }
5168 if ( !DescCS.Long.Gen.u1Long
5169 || DescCS.Long.Gen.u1DefBig
5170 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5171 {
5172 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5173 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5174 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5175 }
5176
5177 /* Don't allow lowering the privilege level. For non-conforming CS
5178 selectors, the CS.DPL sets the privilege level the trap/interrupt
5179 handler runs at. For conforming CS selectors, the CPL remains
5180 unchanged, but the CS.DPL must be <= CPL. */
5181 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5182 * when CPU in Ring-0. Result \#GP? */
5183 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5184 {
5185 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5186 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5187 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5188 }
5189
5190
5191 /* Make sure the selector is present. */
5192 if (!DescCS.Legacy.Gen.u1Present)
5193 {
5194 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5195 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5196 }
5197
5198 /* Check that the new RIP is canonical. */
5199 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5200 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5201 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5202 if (!IEM_IS_CANONICAL(uNewRip))
5203 {
5204 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5205 return iemRaiseGeneralProtectionFault0(pVCpu);
5206 }
5207
5208 /*
5209 * If the privilege level changes or if the IST isn't zero, we need to get
5210 * a new stack from the TSS.
5211 */
5212 uint64_t uNewRsp;
5213 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5214 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5215 if ( uNewCpl != pVCpu->iem.s.uCpl
5216 || Idte.Gate.u3IST != 0)
5217 {
5218 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, pCtx, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5219 if (rcStrict != VINF_SUCCESS)
5220 return rcStrict;
5221 }
5222 else
5223 uNewRsp = pCtx->rsp;
5224 uNewRsp &= ~(uint64_t)0xf;
5225
5226 /*
5227 * Calc the flag image to push.
5228 */
5229 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu, pCtx);
5230 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5231 fEfl &= ~X86_EFL_RF;
5232 else
5233 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5234
5235 /*
5236 * Start making changes.
5237 */
5238 /* Set the new CPL so that stack accesses use it. */
5239 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5240 pVCpu->iem.s.uCpl = uNewCpl;
5241
5242 /* Create the stack frame. */
5243 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5244 RTPTRUNION uStackFrame;
5245 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5246 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5247 if (rcStrict != VINF_SUCCESS)
5248 return rcStrict;
5249 void * const pvStackFrame = uStackFrame.pv;
5250
5251 if (fFlags & IEM_XCPT_FLAGS_ERR)
5252 *uStackFrame.pu64++ = uErr;
5253 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pCtx->rip + cbInstr : pCtx->rip;
5254 uStackFrame.pu64[1] = (pCtx->cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5255 uStackFrame.pu64[2] = fEfl;
5256 uStackFrame.pu64[3] = pCtx->rsp;
5257 uStackFrame.pu64[4] = pCtx->ss.Sel;
5258 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5259 if (rcStrict != VINF_SUCCESS)
5260 return rcStrict;
5261
5262 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5263 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5264 * after pushing the stack frame? (Write protect the gdt + stack to
5265 * find out.) */
5266 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5267 {
5268 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5269 if (rcStrict != VINF_SUCCESS)
5270 return rcStrict;
5271 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5272 }
5273
5274 /*
5275 * Start comitting the register changes.
5276 */
5277 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5278 * hidden registers when interrupting 32-bit or 16-bit code! */
5279 if (uNewCpl != uOldCpl)
5280 {
5281 pCtx->ss.Sel = 0 | uNewCpl;
5282 pCtx->ss.ValidSel = 0 | uNewCpl;
5283 pCtx->ss.fFlags = CPUMSELREG_FLAGS_VALID;
5284 pCtx->ss.u32Limit = UINT32_MAX;
5285 pCtx->ss.u64Base = 0;
5286 pCtx->ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5287 }
5288 pCtx->rsp = uNewRsp - cbStackFrame;
5289 pCtx->cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5290 pCtx->cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5291 pCtx->cs.fFlags = CPUMSELREG_FLAGS_VALID;
5292 pCtx->cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5293 pCtx->cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5294 pCtx->cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5295 pCtx->rip = uNewRip;
5296
5297 fEfl &= ~fEflToClear;
5298 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl);
5299
5300 if (fFlags & IEM_XCPT_FLAGS_CR2)
5301 pCtx->cr2 = uCr2;
5302
5303 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5304 iemRaiseXcptAdjustState(pCtx, u8Vector);
5305
5306 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5307}
5308
5309
5310/**
5311 * Implements exceptions and interrupts.
5312 *
5313 * All exceptions and interrupts goes thru this function!
5314 *
5315 * @returns VBox strict status code.
5316 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5317 * @param cbInstr The number of bytes to offset rIP by in the return
5318 * address.
5319 * @param u8Vector The interrupt / exception vector number.
5320 * @param fFlags The flags.
5321 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5322 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5323 */
5324DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5325iemRaiseXcptOrInt(PVMCPU pVCpu,
5326 uint8_t cbInstr,
5327 uint8_t u8Vector,
5328 uint32_t fFlags,
5329 uint16_t uErr,
5330 uint64_t uCr2)
5331{
5332 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
5333
5334 /*
5335 * Get all the state that we might need here.
5336 */
5337#ifdef IN_RING0
5338 int rc = HMR0EnsureCompleteBasicContext(pVCpu, pCtx);
5339 AssertRCReturn(rc, rc);
5340#endif
5341 IEM_CTX_IMPORT_RET(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5342 IEM_CTX_ASSERT(pCtx, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5343
5344#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5345 /*
5346 * Flush prefetch buffer
5347 */
5348 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5349#endif
5350
5351 /*
5352 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5353 */
5354 if ( pCtx->eflags.Bits.u1VM
5355 && pCtx->eflags.Bits.u2IOPL != 3
5356 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5357 && (pCtx->cr0 & X86_CR0_PE) )
5358 {
5359 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5360 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5361 u8Vector = X86_XCPT_GP;
5362 uErr = 0;
5363 }
5364#ifdef DBGFTRACE_ENABLED
5365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5366 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5367 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp);
5368#endif
5369
5370#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5371 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
5372 {
5373 /*
5374 * If the event is being injected as part of VMRUN, it isn't subject to event
5375 * intercepts in the nested-guest. However, secondary exceptions that occur
5376 * during injection of any event -are- subject to exception intercepts.
5377 * See AMD spec. 15.20 "Event Injection".
5378 */
5379 if (!pCtx->hwvirt.svm.fInterceptEvents)
5380 pCtx->hwvirt.svm.fInterceptEvents = 1;
5381 else
5382 {
5383 /*
5384 * Check and handle if the event being raised is intercepted.
5385 */
5386 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, pCtx, u8Vector, fFlags, uErr, uCr2);
5387 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5388 return rcStrict0;
5389 }
5390 }
5391#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5392
5393 /*
5394 * Do recursion accounting.
5395 */
5396 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5397 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5398 if (pVCpu->iem.s.cXcptRecursions == 0)
5399 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5400 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2));
5401 else
5402 {
5403 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5404 u8Vector, pCtx->cs.Sel, pCtx->rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5405 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5406
5407 if (pVCpu->iem.s.cXcptRecursions >= 3)
5408 {
5409#ifdef DEBUG_bird
5410 AssertFailed();
5411#endif
5412 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5413 }
5414
5415 /*
5416 * Evaluate the sequence of recurring events.
5417 */
5418 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5419 NULL /* pXcptRaiseInfo */);
5420 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5421 { /* likely */ }
5422 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5423 {
5424 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5425 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5426 u8Vector = X86_XCPT_DF;
5427 uErr = 0;
5428 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5429 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5430 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5431 }
5432 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5433 {
5434 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5435 return iemInitiateCpuShutdown(pVCpu);
5436 }
5437 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5438 {
5439 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5440 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5441 if (!CPUMIsGuestInNestedHwVirtMode(pCtx))
5442 return VERR_EM_GUEST_CPU_HANG;
5443 }
5444 else
5445 {
5446 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5447 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5448 return VERR_IEM_IPE_9;
5449 }
5450
5451 /*
5452 * The 'EXT' bit is set when an exception occurs during deliver of an external
5453 * event (such as an interrupt or earlier exception)[1]. Privileged software
5454 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5455 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5456 *
5457 * [1] - Intel spec. 6.13 "Error Code"
5458 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5459 * [3] - Intel Instruction reference for INT n.
5460 */
5461 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5462 && (fFlags & IEM_XCPT_FLAGS_ERR)
5463 && u8Vector != X86_XCPT_PF
5464 && u8Vector != X86_XCPT_DF)
5465 {
5466 uErr |= X86_TRAP_ERR_EXTERNAL;
5467 }
5468 }
5469
5470 pVCpu->iem.s.cXcptRecursions++;
5471 pVCpu->iem.s.uCurXcpt = u8Vector;
5472 pVCpu->iem.s.fCurXcpt = fFlags;
5473 pVCpu->iem.s.uCurXcptErr = uErr;
5474 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5475
5476 /*
5477 * Extensive logging.
5478 */
5479#if defined(LOG_ENABLED) && defined(IN_RING3)
5480 if (LogIs3Enabled())
5481 {
5482 PVM pVM = pVCpu->CTX_SUFF(pVM);
5483 char szRegs[4096];
5484 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5485 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5486 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5487 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5488 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5489 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5490 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5491 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5492 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5493 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5494 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5495 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5496 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5497 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5498 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5499 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5500 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5501 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5502 " efer=%016VR{efer}\n"
5503 " pat=%016VR{pat}\n"
5504 " sf_mask=%016VR{sf_mask}\n"
5505 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5506 " lstar=%016VR{lstar}\n"
5507 " star=%016VR{star} cstar=%016VR{cstar}\n"
5508 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5509 );
5510
5511 char szInstr[256];
5512 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5513 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5514 szInstr, sizeof(szInstr), NULL);
5515 Log3(("%s%s\n", szRegs, szInstr));
5516 }
5517#endif /* LOG_ENABLED */
5518
5519 /*
5520 * Call the mode specific worker function.
5521 */
5522 VBOXSTRICTRC rcStrict;
5523 if (!(pCtx->cr0 & X86_CR0_PE))
5524 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5525 else if (pCtx->msrEFER & MSR_K6_EFER_LMA)
5526 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5527 else
5528 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, pCtx, cbInstr, u8Vector, fFlags, uErr, uCr2);
5529
5530 /* Flush the prefetch buffer. */
5531#ifdef IEM_WITH_CODE_TLB
5532 pVCpu->iem.s.pbInstrBuf = NULL;
5533#else
5534 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5535#endif
5536
5537 /*
5538 * Unwind.
5539 */
5540 pVCpu->iem.s.cXcptRecursions--;
5541 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5542 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5543 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5544 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->esp, pVCpu->iem.s.uCpl,
5545 pVCpu->iem.s.cXcptRecursions + 1));
5546 return rcStrict;
5547}
5548
5549#ifdef IEM_WITH_SETJMP
5550/**
5551 * See iemRaiseXcptOrInt. Will not return.
5552 */
5553IEM_STATIC DECL_NO_RETURN(void)
5554iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5555 uint8_t cbInstr,
5556 uint8_t u8Vector,
5557 uint32_t fFlags,
5558 uint16_t uErr,
5559 uint64_t uCr2)
5560{
5561 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5562 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5563}
5564#endif
5565
5566
5567/** \#DE - 00. */
5568DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5569{
5570 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5571}
5572
5573
5574/** \#DB - 01.
5575 * @note This automatically clear DR7.GD. */
5576DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5577{
5578 /** @todo set/clear RF. */
5579 IEM_GET_CTX(pVCpu)->dr[7] &= ~X86_DR7_GD;
5580 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5581}
5582
5583
5584/** \#BR - 05. */
5585DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5586{
5587 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5588}
5589
5590
5591/** \#UD - 06. */
5592DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5593{
5594 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5595}
5596
5597
5598/** \#NM - 07. */
5599DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5600{
5601 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5602}
5603
5604
5605/** \#TS(err) - 0a. */
5606DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5607{
5608 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5609}
5610
5611
5612/** \#TS(tr) - 0a. */
5613DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5614{
5615 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5616 IEM_GET_CTX(pVCpu)->tr.Sel, 0);
5617}
5618
5619
5620/** \#TS(0) - 0a. */
5621DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5622{
5623 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5624 0, 0);
5625}
5626
5627
5628/** \#TS(err) - 0a. */
5629DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5630{
5631 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5632 uSel & X86_SEL_MASK_OFF_RPL, 0);
5633}
5634
5635
5636/** \#NP(err) - 0b. */
5637DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5638{
5639 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5640}
5641
5642
5643/** \#NP(sel) - 0b. */
5644DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5645{
5646 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5647 uSel & ~X86_SEL_RPL, 0);
5648}
5649
5650
5651/** \#SS(seg) - 0c. */
5652DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5653{
5654 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5655 uSel & ~X86_SEL_RPL, 0);
5656}
5657
5658
5659/** \#SS(err) - 0c. */
5660DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5661{
5662 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5663}
5664
5665
5666/** \#GP(n) - 0d. */
5667DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5668{
5669 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5670}
5671
5672
5673/** \#GP(0) - 0d. */
5674DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5675{
5676 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5677}
5678
5679#ifdef IEM_WITH_SETJMP
5680/** \#GP(0) - 0d. */
5681DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5682{
5683 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5684}
5685#endif
5686
5687
5688/** \#GP(sel) - 0d. */
5689DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5690{
5691 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5692 Sel & ~X86_SEL_RPL, 0);
5693}
5694
5695
5696/** \#GP(0) - 0d. */
5697DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5698{
5699 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5700}
5701
5702
5703/** \#GP(sel) - 0d. */
5704DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5705{
5706 NOREF(iSegReg); NOREF(fAccess);
5707 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5708 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5709}
5710
5711#ifdef IEM_WITH_SETJMP
5712/** \#GP(sel) - 0d, longjmp. */
5713DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5714{
5715 NOREF(iSegReg); NOREF(fAccess);
5716 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5717 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5718}
5719#endif
5720
5721/** \#GP(sel) - 0d. */
5722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5723{
5724 NOREF(Sel);
5725 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5726}
5727
5728#ifdef IEM_WITH_SETJMP
5729/** \#GP(sel) - 0d, longjmp. */
5730DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5731{
5732 NOREF(Sel);
5733 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5734}
5735#endif
5736
5737
5738/** \#GP(sel) - 0d. */
5739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5740{
5741 NOREF(iSegReg); NOREF(fAccess);
5742 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5743}
5744
5745#ifdef IEM_WITH_SETJMP
5746/** \#GP(sel) - 0d, longjmp. */
5747DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5748 uint32_t fAccess)
5749{
5750 NOREF(iSegReg); NOREF(fAccess);
5751 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5752}
5753#endif
5754
5755
5756/** \#PF(n) - 0e. */
5757DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5758{
5759 uint16_t uErr;
5760 switch (rc)
5761 {
5762 case VERR_PAGE_NOT_PRESENT:
5763 case VERR_PAGE_TABLE_NOT_PRESENT:
5764 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5765 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5766 uErr = 0;
5767 break;
5768
5769 default:
5770 AssertMsgFailed(("%Rrc\n", rc));
5771 RT_FALL_THRU();
5772 case VERR_ACCESS_DENIED:
5773 uErr = X86_TRAP_PF_P;
5774 break;
5775
5776 /** @todo reserved */
5777 }
5778
5779 if (pVCpu->iem.s.uCpl == 3)
5780 uErr |= X86_TRAP_PF_US;
5781
5782 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5783 && ( (IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_PAE)
5784 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) ) )
5785 uErr |= X86_TRAP_PF_ID;
5786
5787#if 0 /* This is so much non-sense, really. Why was it done like that? */
5788 /* Note! RW access callers reporting a WRITE protection fault, will clear
5789 the READ flag before calling. So, read-modify-write accesses (RW)
5790 can safely be reported as READ faults. */
5791 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5792 uErr |= X86_TRAP_PF_RW;
5793#else
5794 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5795 {
5796 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5797 uErr |= X86_TRAP_PF_RW;
5798 }
5799#endif
5800
5801 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5802 uErr, GCPtrWhere);
5803}
5804
5805#ifdef IEM_WITH_SETJMP
5806/** \#PF(n) - 0e, longjmp. */
5807IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5808{
5809 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5810}
5811#endif
5812
5813
5814/** \#MF(0) - 10. */
5815DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5816{
5817 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5818}
5819
5820
5821/** \#AC(0) - 11. */
5822DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5823{
5824 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5825}
5826
5827
5828/**
5829 * Macro for calling iemCImplRaiseDivideError().
5830 *
5831 * This enables us to add/remove arguments and force different levels of
5832 * inlining as we wish.
5833 *
5834 * @return Strict VBox status code.
5835 */
5836#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5837IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5838{
5839 NOREF(cbInstr);
5840 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5841}
5842
5843
5844/**
5845 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5846 *
5847 * This enables us to add/remove arguments and force different levels of
5848 * inlining as we wish.
5849 *
5850 * @return Strict VBox status code.
5851 */
5852#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5853IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5854{
5855 NOREF(cbInstr);
5856 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5857}
5858
5859
5860/**
5861 * Macro for calling iemCImplRaiseInvalidOpcode().
5862 *
5863 * This enables us to add/remove arguments and force different levels of
5864 * inlining as we wish.
5865 *
5866 * @return Strict VBox status code.
5867 */
5868#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5869IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5870{
5871 NOREF(cbInstr);
5872 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5873}
5874
5875
5876/** @} */
5877
5878
5879/*
5880 *
5881 * Helpers routines.
5882 * Helpers routines.
5883 * Helpers routines.
5884 *
5885 */
5886
5887/**
5888 * Recalculates the effective operand size.
5889 *
5890 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5891 */
5892IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5893{
5894 switch (pVCpu->iem.s.enmCpuMode)
5895 {
5896 case IEMMODE_16BIT:
5897 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5898 break;
5899 case IEMMODE_32BIT:
5900 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5901 break;
5902 case IEMMODE_64BIT:
5903 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5904 {
5905 case 0:
5906 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5907 break;
5908 case IEM_OP_PRF_SIZE_OP:
5909 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5910 break;
5911 case IEM_OP_PRF_SIZE_REX_W:
5912 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5913 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5914 break;
5915 }
5916 break;
5917 default:
5918 AssertFailed();
5919 }
5920}
5921
5922
5923/**
5924 * Sets the default operand size to 64-bit and recalculates the effective
5925 * operand size.
5926 *
5927 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5928 */
5929IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
5930{
5931 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
5932 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
5933 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
5934 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5935 else
5936 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5937}
5938
5939
5940/*
5941 *
5942 * Common opcode decoders.
5943 * Common opcode decoders.
5944 * Common opcode decoders.
5945 *
5946 */
5947//#include <iprt/mem.h>
5948
5949/**
5950 * Used to add extra details about a stub case.
5951 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5952 */
5953IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
5954{
5955#if defined(LOG_ENABLED) && defined(IN_RING3)
5956 PVM pVM = pVCpu->CTX_SUFF(pVM);
5957 char szRegs[4096];
5958 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5959 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5960 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5961 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5962 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5963 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5964 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5965 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5966 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5967 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5968 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5969 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5970 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5971 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5972 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5973 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5974 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5975 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5976 " efer=%016VR{efer}\n"
5977 " pat=%016VR{pat}\n"
5978 " sf_mask=%016VR{sf_mask}\n"
5979 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5980 " lstar=%016VR{lstar}\n"
5981 " star=%016VR{star} cstar=%016VR{cstar}\n"
5982 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5983 );
5984
5985 char szInstr[256];
5986 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5987 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5988 szInstr, sizeof(szInstr), NULL);
5989
5990 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
5991#else
5992 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", IEM_GET_CTX(pVCpu)->cs, IEM_GET_CTX(pVCpu)->rip);
5993#endif
5994}
5995
5996/**
5997 * Complains about a stub.
5998 *
5999 * Providing two versions of this macro, one for daily use and one for use when
6000 * working on IEM.
6001 */
6002#if 0
6003# define IEMOP_BITCH_ABOUT_STUB() \
6004 do { \
6005 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6006 iemOpStubMsg2(pVCpu); \
6007 RTAssertPanic(); \
6008 } while (0)
6009#else
6010# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6011#endif
6012
6013/** Stubs an opcode. */
6014#define FNIEMOP_STUB(a_Name) \
6015 FNIEMOP_DEF(a_Name) \
6016 { \
6017 RT_NOREF_PV(pVCpu); \
6018 IEMOP_BITCH_ABOUT_STUB(); \
6019 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6020 } \
6021 typedef int ignore_semicolon
6022
6023/** Stubs an opcode. */
6024#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6025 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6026 { \
6027 RT_NOREF_PV(pVCpu); \
6028 RT_NOREF_PV(a_Name0); \
6029 IEMOP_BITCH_ABOUT_STUB(); \
6030 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6031 } \
6032 typedef int ignore_semicolon
6033
6034/** Stubs an opcode which currently should raise \#UD. */
6035#define FNIEMOP_UD_STUB(a_Name) \
6036 FNIEMOP_DEF(a_Name) \
6037 { \
6038 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6039 return IEMOP_RAISE_INVALID_OPCODE(); \
6040 } \
6041 typedef int ignore_semicolon
6042
6043/** Stubs an opcode which currently should raise \#UD. */
6044#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6045 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6046 { \
6047 RT_NOREF_PV(pVCpu); \
6048 RT_NOREF_PV(a_Name0); \
6049 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6050 return IEMOP_RAISE_INVALID_OPCODE(); \
6051 } \
6052 typedef int ignore_semicolon
6053
6054
6055
6056/** @name Register Access.
6057 * @{
6058 */
6059
6060/**
6061 * Gets a reference (pointer) to the specified hidden segment register.
6062 *
6063 * @returns Hidden register reference.
6064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6065 * @param iSegReg The segment register.
6066 */
6067IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6068{
6069 Assert(iSegReg < X86_SREG_COUNT);
6070 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6071 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6072 PCPUMSELREG pSReg = &pCtx->aSRegs[iSegReg];
6073
6074#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6075 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6076 { /* likely */ }
6077 else
6078 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6079#else
6080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6081#endif
6082 return pSReg;
6083}
6084
6085
6086/**
6087 * Ensures that the given hidden segment register is up to date.
6088 *
6089 * @returns Hidden register reference.
6090 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6091 * @param pSReg The segment register.
6092 */
6093IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6094{
6095#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6096 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6097 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6098#else
6099 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6100 NOREF(pVCpu);
6101#endif
6102 return pSReg;
6103}
6104
6105
6106/**
6107 * Gets a reference (pointer) to the specified segment register (the selector
6108 * value).
6109 *
6110 * @returns Pointer to the selector variable.
6111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6112 * @param iSegReg The segment register.
6113 */
6114DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6115{
6116 Assert(iSegReg < X86_SREG_COUNT);
6117 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6118 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6119 return &pCtx->aSRegs[iSegReg].Sel;
6120}
6121
6122
6123/**
6124 * Fetches the selector value of a segment register.
6125 *
6126 * @returns The selector value.
6127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6128 * @param iSegReg The segment register.
6129 */
6130DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6131{
6132 Assert(iSegReg < X86_SREG_COUNT);
6133 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6134 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6135 return pCtx->aSRegs[iSegReg].Sel;
6136}
6137
6138
6139/**
6140 * Fetches the base address value of a segment register.
6141 *
6142 * @returns The selector value.
6143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6144 * @param iSegReg The segment register.
6145 */
6146DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6147{
6148 Assert(iSegReg < X86_SREG_COUNT);
6149 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6150 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6151 return pCtx->aSRegs[iSegReg].u64Base;
6152}
6153
6154
6155/**
6156 * Gets a reference (pointer) to the specified general purpose register.
6157 *
6158 * @returns Register reference.
6159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6160 * @param iReg The general purpose register.
6161 */
6162DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6163{
6164 Assert(iReg < 16);
6165 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6166 return &pCtx->aGRegs[iReg];
6167}
6168
6169
6170/**
6171 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6172 *
6173 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6174 *
6175 * @returns Register reference.
6176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6177 * @param iReg The register.
6178 */
6179DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6180{
6181 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6182 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6183 {
6184 Assert(iReg < 16);
6185 return &pCtx->aGRegs[iReg].u8;
6186 }
6187 /* high 8-bit register. */
6188 Assert(iReg < 8);
6189 return &pCtx->aGRegs[iReg & 3].bHi;
6190}
6191
6192
6193/**
6194 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6195 *
6196 * @returns Register reference.
6197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6198 * @param iReg The register.
6199 */
6200DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6201{
6202 Assert(iReg < 16);
6203 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6204 return &pCtx->aGRegs[iReg].u16;
6205}
6206
6207
6208/**
6209 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6210 *
6211 * @returns Register reference.
6212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6213 * @param iReg The register.
6214 */
6215DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6216{
6217 Assert(iReg < 16);
6218 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6219 return &pCtx->aGRegs[iReg].u32;
6220}
6221
6222
6223/**
6224 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6225 *
6226 * @returns Register reference.
6227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6228 * @param iReg The register.
6229 */
6230DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6231{
6232 Assert(iReg < 64);
6233 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6234 return &pCtx->aGRegs[iReg].u64;
6235}
6236
6237
6238/**
6239 * Gets a reference (pointer) to the specified segment register's base address.
6240 *
6241 * @returns Segment register base address reference.
6242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6243 * @param iSegReg The segment selector.
6244 */
6245DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6246{
6247 Assert(iSegReg < X86_SREG_COUNT);
6248 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6249 IEM_CTX_ASSERT(pCtx, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6250 return &pCtx->aSRegs[iSegReg].u64Base;
6251}
6252
6253
6254/**
6255 * Fetches the value of a 8-bit general purpose register.
6256 *
6257 * @returns The register value.
6258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6259 * @param iReg The register.
6260 */
6261DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6262{
6263 return *iemGRegRefU8(pVCpu, iReg);
6264}
6265
6266
6267/**
6268 * Fetches the value of a 16-bit general purpose register.
6269 *
6270 * @returns The register value.
6271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6272 * @param iReg The register.
6273 */
6274DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6275{
6276 Assert(iReg < 16);
6277 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u16;
6278}
6279
6280
6281/**
6282 * Fetches the value of a 32-bit general purpose register.
6283 *
6284 * @returns The register value.
6285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6286 * @param iReg The register.
6287 */
6288DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6289{
6290 Assert(iReg < 16);
6291 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u32;
6292}
6293
6294
6295/**
6296 * Fetches the value of a 64-bit general purpose register.
6297 *
6298 * @returns The register value.
6299 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6300 * @param iReg The register.
6301 */
6302DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6303{
6304 Assert(iReg < 16);
6305 return IEM_GET_CTX(pVCpu)->aGRegs[iReg].u64;
6306}
6307
6308
6309/**
6310 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6311 *
6312 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6313 * segment limit.
6314 *
6315 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6316 * @param offNextInstr The offset of the next instruction.
6317 */
6318IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6319{
6320 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6321 switch (pVCpu->iem.s.enmEffOpSize)
6322 {
6323 case IEMMODE_16BIT:
6324 {
6325 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6326 if ( uNewIp > pCtx->cs.u32Limit
6327 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6328 return iemRaiseGeneralProtectionFault0(pVCpu);
6329 pCtx->rip = uNewIp;
6330 break;
6331 }
6332
6333 case IEMMODE_32BIT:
6334 {
6335 Assert(pCtx->rip <= UINT32_MAX);
6336 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6337
6338 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6339 if (uNewEip > pCtx->cs.u32Limit)
6340 return iemRaiseGeneralProtectionFault0(pVCpu);
6341 pCtx->rip = uNewEip;
6342 break;
6343 }
6344
6345 case IEMMODE_64BIT:
6346 {
6347 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6348
6349 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6350 if (!IEM_IS_CANONICAL(uNewRip))
6351 return iemRaiseGeneralProtectionFault0(pVCpu);
6352 pCtx->rip = uNewRip;
6353 break;
6354 }
6355
6356 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6357 }
6358
6359 pCtx->eflags.Bits.u1RF = 0;
6360
6361#ifndef IEM_WITH_CODE_TLB
6362 /* Flush the prefetch buffer. */
6363 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6364#endif
6365
6366 return VINF_SUCCESS;
6367}
6368
6369
6370/**
6371 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6372 *
6373 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6374 * segment limit.
6375 *
6376 * @returns Strict VBox status code.
6377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6378 * @param offNextInstr The offset of the next instruction.
6379 */
6380IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6381{
6382 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6383 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6384
6385 uint16_t uNewIp = pCtx->ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6386 if ( uNewIp > pCtx->cs.u32Limit
6387 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6388 return iemRaiseGeneralProtectionFault0(pVCpu);
6389 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6390 pCtx->rip = uNewIp;
6391 pCtx->eflags.Bits.u1RF = 0;
6392
6393#ifndef IEM_WITH_CODE_TLB
6394 /* Flush the prefetch buffer. */
6395 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6396#endif
6397
6398 return VINF_SUCCESS;
6399}
6400
6401
6402/**
6403 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6404 *
6405 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6406 * segment limit.
6407 *
6408 * @returns Strict VBox status code.
6409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6410 * @param offNextInstr The offset of the next instruction.
6411 */
6412IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6413{
6414 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6415 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6416
6417 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6418 {
6419 Assert(pCtx->rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6420
6421 uint32_t uNewEip = pCtx->eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6422 if (uNewEip > pCtx->cs.u32Limit)
6423 return iemRaiseGeneralProtectionFault0(pVCpu);
6424 pCtx->rip = uNewEip;
6425 }
6426 else
6427 {
6428 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6429
6430 uint64_t uNewRip = pCtx->rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6431 if (!IEM_IS_CANONICAL(uNewRip))
6432 return iemRaiseGeneralProtectionFault0(pVCpu);
6433 pCtx->rip = uNewRip;
6434 }
6435 pCtx->eflags.Bits.u1RF = 0;
6436
6437#ifndef IEM_WITH_CODE_TLB
6438 /* Flush the prefetch buffer. */
6439 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6440#endif
6441
6442 return VINF_SUCCESS;
6443}
6444
6445
6446/**
6447 * Performs a near jump to the specified address.
6448 *
6449 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6450 * segment limit.
6451 *
6452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6453 * @param uNewRip The new RIP value.
6454 */
6455IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6456{
6457 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6458 switch (pVCpu->iem.s.enmEffOpSize)
6459 {
6460 case IEMMODE_16BIT:
6461 {
6462 Assert(uNewRip <= UINT16_MAX);
6463 if ( uNewRip > pCtx->cs.u32Limit
6464 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6465 return iemRaiseGeneralProtectionFault0(pVCpu);
6466 /** @todo Test 16-bit jump in 64-bit mode. */
6467 pCtx->rip = uNewRip;
6468 break;
6469 }
6470
6471 case IEMMODE_32BIT:
6472 {
6473 Assert(uNewRip <= UINT32_MAX);
6474 Assert(pCtx->rip <= UINT32_MAX);
6475 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6476
6477 if (uNewRip > pCtx->cs.u32Limit)
6478 return iemRaiseGeneralProtectionFault0(pVCpu);
6479 pCtx->rip = uNewRip;
6480 break;
6481 }
6482
6483 case IEMMODE_64BIT:
6484 {
6485 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6486
6487 if (!IEM_IS_CANONICAL(uNewRip))
6488 return iemRaiseGeneralProtectionFault0(pVCpu);
6489 pCtx->rip = uNewRip;
6490 break;
6491 }
6492
6493 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6494 }
6495
6496 pCtx->eflags.Bits.u1RF = 0;
6497
6498#ifndef IEM_WITH_CODE_TLB
6499 /* Flush the prefetch buffer. */
6500 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6501#endif
6502
6503 return VINF_SUCCESS;
6504}
6505
6506
6507/**
6508 * Get the address of the top of the stack.
6509 *
6510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6511 * @param pCtx The CPU context which SP/ESP/RSP should be
6512 * read.
6513 */
6514DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu, PCCPUMCTX pCtx)
6515{
6516 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6517 return pCtx->rsp;
6518 if (pCtx->ss.Attr.n.u1DefBig)
6519 return pCtx->esp;
6520 return pCtx->sp;
6521}
6522
6523
6524/**
6525 * Updates the RIP/EIP/IP to point to the next instruction.
6526 *
6527 * This function leaves the EFLAGS.RF flag alone.
6528 *
6529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6530 * @param cbInstr The number of bytes to add.
6531 */
6532IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6533{
6534 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6535 switch (pVCpu->iem.s.enmCpuMode)
6536 {
6537 case IEMMODE_16BIT:
6538 Assert(pCtx->rip <= UINT16_MAX);
6539 pCtx->eip += cbInstr;
6540 pCtx->eip &= UINT32_C(0xffff);
6541 break;
6542
6543 case IEMMODE_32BIT:
6544 pCtx->eip += cbInstr;
6545 Assert(pCtx->rip <= UINT32_MAX);
6546 break;
6547
6548 case IEMMODE_64BIT:
6549 pCtx->rip += cbInstr;
6550 break;
6551 default: AssertFailed();
6552 }
6553}
6554
6555
6556#if 0
6557/**
6558 * Updates the RIP/EIP/IP to point to the next instruction.
6559 *
6560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6561 */
6562IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6563{
6564 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6565}
6566#endif
6567
6568
6569
6570/**
6571 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6572 *
6573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6574 * @param cbInstr The number of bytes to add.
6575 */
6576IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6577{
6578 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6579
6580 pCtx->eflags.Bits.u1RF = 0;
6581
6582 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6583#if ARCH_BITS >= 64
6584 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6585 Assert(pCtx->rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6586 pCtx->rip = (pCtx->rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6587#else
6588 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6589 pCtx->rip += cbInstr;
6590 else
6591 pCtx->eip += cbInstr;
6592#endif
6593}
6594
6595
6596/**
6597 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6598 *
6599 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6600 */
6601IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6602{
6603 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6604}
6605
6606
6607/**
6608 * Adds to the stack pointer.
6609 *
6610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6611 * @param pCtx The CPU context which SP/ESP/RSP should be
6612 * updated.
6613 * @param cbToAdd The number of bytes to add (8-bit!).
6614 */
6615DECLINLINE(void) iemRegAddToRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToAdd)
6616{
6617 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6618 pCtx->rsp += cbToAdd;
6619 else if (pCtx->ss.Attr.n.u1DefBig)
6620 pCtx->esp += cbToAdd;
6621 else
6622 pCtx->sp += cbToAdd;
6623}
6624
6625
6626/**
6627 * Subtracts from the stack pointer.
6628 *
6629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6630 * @param pCtx The CPU context which SP/ESP/RSP should be
6631 * updated.
6632 * @param cbToSub The number of bytes to subtract (8-bit!).
6633 */
6634DECLINLINE(void) iemRegSubFromRsp(PCVMCPU pVCpu, PCPUMCTX pCtx, uint8_t cbToSub)
6635{
6636 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6637 pCtx->rsp -= cbToSub;
6638 else if (pCtx->ss.Attr.n.u1DefBig)
6639 pCtx->esp -= cbToSub;
6640 else
6641 pCtx->sp -= cbToSub;
6642}
6643
6644
6645/**
6646 * Adds to the temporary stack pointer.
6647 *
6648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6649 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6650 * @param cbToAdd The number of bytes to add (16-bit).
6651 * @param pCtx Where to get the current stack mode.
6652 */
6653DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6654{
6655 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6656 pTmpRsp->u += cbToAdd;
6657 else if (pCtx->ss.Attr.n.u1DefBig)
6658 pTmpRsp->DWords.dw0 += cbToAdd;
6659 else
6660 pTmpRsp->Words.w0 += cbToAdd;
6661}
6662
6663
6664/**
6665 * Subtracts from the temporary stack pointer.
6666 *
6667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6668 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6669 * @param cbToSub The number of bytes to subtract.
6670 * @param pCtx Where to get the current stack mode.
6671 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6672 * expecting that.
6673 */
6674DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6675{
6676 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6677 pTmpRsp->u -= cbToSub;
6678 else if (pCtx->ss.Attr.n.u1DefBig)
6679 pTmpRsp->DWords.dw0 -= cbToSub;
6680 else
6681 pTmpRsp->Words.w0 -= cbToSub;
6682}
6683
6684
6685/**
6686 * Calculates the effective stack address for a push of the specified size as
6687 * well as the new RSP value (upper bits may be masked).
6688 *
6689 * @returns Effective stack addressf for the push.
6690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6691 * @param pCtx Where to get the current stack mode.
6692 * @param cbItem The size of the stack item to pop.
6693 * @param puNewRsp Where to return the new RSP value.
6694 */
6695DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6696{
6697 RTUINT64U uTmpRsp;
6698 RTGCPTR GCPtrTop;
6699 uTmpRsp.u = pCtx->rsp;
6700
6701 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6702 GCPtrTop = uTmpRsp.u -= cbItem;
6703 else if (pCtx->ss.Attr.n.u1DefBig)
6704 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6705 else
6706 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6707 *puNewRsp = uTmpRsp.u;
6708 return GCPtrTop;
6709}
6710
6711
6712/**
6713 * Gets the current stack pointer and calculates the value after a pop of the
6714 * specified size.
6715 *
6716 * @returns Current stack pointer.
6717 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6718 * @param pCtx Where to get the current stack mode.
6719 * @param cbItem The size of the stack item to pop.
6720 * @param puNewRsp Where to return the new RSP value.
6721 */
6722DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, PCCPUMCTX pCtx, uint8_t cbItem, uint64_t *puNewRsp)
6723{
6724 RTUINT64U uTmpRsp;
6725 RTGCPTR GCPtrTop;
6726 uTmpRsp.u = pCtx->rsp;
6727
6728 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6729 {
6730 GCPtrTop = uTmpRsp.u;
6731 uTmpRsp.u += cbItem;
6732 }
6733 else if (pCtx->ss.Attr.n.u1DefBig)
6734 {
6735 GCPtrTop = uTmpRsp.DWords.dw0;
6736 uTmpRsp.DWords.dw0 += cbItem;
6737 }
6738 else
6739 {
6740 GCPtrTop = uTmpRsp.Words.w0;
6741 uTmpRsp.Words.w0 += cbItem;
6742 }
6743 *puNewRsp = uTmpRsp.u;
6744 return GCPtrTop;
6745}
6746
6747
6748/**
6749 * Calculates the effective stack address for a push of the specified size as
6750 * well as the new temporary RSP value (upper bits may be masked).
6751 *
6752 * @returns Effective stack addressf for the push.
6753 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6754 * @param pCtx Where to get the current stack mode.
6755 * @param pTmpRsp The temporary stack pointer. This is updated.
6756 * @param cbItem The size of the stack item to pop.
6757 */
6758DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6759{
6760 RTGCPTR GCPtrTop;
6761
6762 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6763 GCPtrTop = pTmpRsp->u -= cbItem;
6764 else if (pCtx->ss.Attr.n.u1DefBig)
6765 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6766 else
6767 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6768 return GCPtrTop;
6769}
6770
6771
6772/**
6773 * Gets the effective stack address for a pop of the specified size and
6774 * calculates and updates the temporary RSP.
6775 *
6776 * @returns Current stack pointer.
6777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6778 * @param pCtx Where to get the current stack mode.
6779 * @param pTmpRsp The temporary stack pointer. This is updated.
6780 * @param cbItem The size of the stack item to pop.
6781 */
6782DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PCCPUMCTX pCtx, PRTUINT64U pTmpRsp, uint8_t cbItem)
6783{
6784 RTGCPTR GCPtrTop;
6785 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6786 {
6787 GCPtrTop = pTmpRsp->u;
6788 pTmpRsp->u += cbItem;
6789 }
6790 else if (pCtx->ss.Attr.n.u1DefBig)
6791 {
6792 GCPtrTop = pTmpRsp->DWords.dw0;
6793 pTmpRsp->DWords.dw0 += cbItem;
6794 }
6795 else
6796 {
6797 GCPtrTop = pTmpRsp->Words.w0;
6798 pTmpRsp->Words.w0 += cbItem;
6799 }
6800 return GCPtrTop;
6801}
6802
6803/** @} */
6804
6805
6806/** @name FPU access and helpers.
6807 *
6808 * @{
6809 */
6810
6811
6812/**
6813 * Hook for preparing to use the host FPU.
6814 *
6815 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6816 *
6817 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6818 */
6819DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6820{
6821#ifdef IN_RING3
6822 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6823#else
6824 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6825#endif
6826 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6827 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6828}
6829
6830
6831/**
6832 * Hook for preparing to use the host FPU for SSE.
6833 *
6834 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6835 *
6836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6837 */
6838DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6839{
6840 iemFpuPrepareUsage(pVCpu);
6841}
6842
6843
6844/**
6845 * Hook for preparing to use the host FPU for AVX.
6846 *
6847 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6848 *
6849 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6850 */
6851DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6852{
6853 iemFpuPrepareUsage(pVCpu);
6854}
6855
6856
6857/**
6858 * Hook for actualizing the guest FPU state before the interpreter reads it.
6859 *
6860 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6861 *
6862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6863 */
6864DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6865{
6866#ifdef IN_RING3
6867 NOREF(pVCpu);
6868#else
6869 CPUMRZFpuStateActualizeForRead(pVCpu);
6870#endif
6871 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6872 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6873}
6874
6875
6876/**
6877 * Hook for actualizing the guest FPU state before the interpreter changes it.
6878 *
6879 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6880 *
6881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6882 */
6883DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6884{
6885#ifdef IN_RING3
6886 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6887#else
6888 CPUMRZFpuStateActualizeForChange(pVCpu);
6889#endif
6890 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6891 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6892}
6893
6894
6895/**
6896 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6897 * only.
6898 *
6899 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6900 *
6901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6902 */
6903DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6904{
6905#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6906 NOREF(pVCpu);
6907#else
6908 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6909#endif
6910 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6911 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6912}
6913
6914
6915/**
6916 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6917 * read+write.
6918 *
6919 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6920 *
6921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6922 */
6923DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6924{
6925#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6926 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6927#else
6928 CPUMRZFpuStateActualizeForChange(pVCpu);
6929#endif
6930 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6931 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6932}
6933
6934
6935/**
6936 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6937 * only.
6938 *
6939 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6940 *
6941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6942 */
6943DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6944{
6945#ifdef IN_RING3
6946 NOREF(pVCpu);
6947#else
6948 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6949#endif
6950 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6951 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6952}
6953
6954
6955/**
6956 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6957 * read+write.
6958 *
6959 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6960 *
6961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6962 */
6963DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
6964{
6965#ifdef IN_RING3
6966 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6967#else
6968 CPUMRZFpuStateActualizeForChange(pVCpu);
6969#endif
6970 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
6971 IEM_CTX_IMPORT_NORET(pVCpu, pCtx, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6972}
6973
6974
6975/**
6976 * Stores a QNaN value into a FPU register.
6977 *
6978 * @param pReg Pointer to the register.
6979 */
6980DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
6981{
6982 pReg->au32[0] = UINT32_C(0x00000000);
6983 pReg->au32[1] = UINT32_C(0xc0000000);
6984 pReg->au16[4] = UINT16_C(0xffff);
6985}
6986
6987
6988/**
6989 * Updates the FOP, FPU.CS and FPUIP registers.
6990 *
6991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6992 * @param pCtx The CPU context.
6993 * @param pFpuCtx The FPU context.
6994 */
6995DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx)
6996{
6997 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
6998 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
6999 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7000 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7001 {
7002 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7003 * happens in real mode here based on the fnsave and fnstenv images. */
7004 pFpuCtx->CS = 0;
7005 pFpuCtx->FPUIP = pCtx->eip | ((uint32_t)pCtx->cs.Sel << 4);
7006 }
7007 else
7008 {
7009 pFpuCtx->CS = pCtx->cs.Sel;
7010 pFpuCtx->FPUIP = pCtx->rip;
7011 }
7012}
7013
7014
7015/**
7016 * Updates the x87.DS and FPUDP registers.
7017 *
7018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7019 * @param pCtx The CPU context.
7020 * @param pFpuCtx The FPU context.
7021 * @param iEffSeg The effective segment register.
7022 * @param GCPtrEff The effective address relative to @a iEffSeg.
7023 */
7024DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PCPUMCTX pCtx, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7025{
7026 RTSEL sel;
7027 switch (iEffSeg)
7028 {
7029 case X86_SREG_DS: sel = pCtx->ds.Sel; break;
7030 case X86_SREG_SS: sel = pCtx->ss.Sel; break;
7031 case X86_SREG_CS: sel = pCtx->cs.Sel; break;
7032 case X86_SREG_ES: sel = pCtx->es.Sel; break;
7033 case X86_SREG_FS: sel = pCtx->fs.Sel; break;
7034 case X86_SREG_GS: sel = pCtx->gs.Sel; break;
7035 default:
7036 AssertMsgFailed(("%d\n", iEffSeg));
7037 sel = pCtx->ds.Sel;
7038 }
7039 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7040 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7041 {
7042 pFpuCtx->DS = 0;
7043 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7044 }
7045 else
7046 {
7047 pFpuCtx->DS = sel;
7048 pFpuCtx->FPUDP = GCPtrEff;
7049 }
7050}
7051
7052
7053/**
7054 * Rotates the stack registers in the push direction.
7055 *
7056 * @param pFpuCtx The FPU context.
7057 * @remarks This is a complete waste of time, but fxsave stores the registers in
7058 * stack order.
7059 */
7060DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7061{
7062 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7063 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7064 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7065 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7066 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7067 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7068 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7069 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7070 pFpuCtx->aRegs[0].r80 = r80Tmp;
7071}
7072
7073
7074/**
7075 * Rotates the stack registers in the pop direction.
7076 *
7077 * @param pFpuCtx The FPU context.
7078 * @remarks This is a complete waste of time, but fxsave stores the registers in
7079 * stack order.
7080 */
7081DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7082{
7083 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7084 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7085 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7086 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7087 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7088 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7089 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7090 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7091 pFpuCtx->aRegs[7].r80 = r80Tmp;
7092}
7093
7094
7095/**
7096 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7097 * exception prevents it.
7098 *
7099 * @param pResult The FPU operation result to push.
7100 * @param pFpuCtx The FPU context.
7101 */
7102IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7103{
7104 /* Update FSW and bail if there are pending exceptions afterwards. */
7105 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7106 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7107 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7108 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7109 {
7110 pFpuCtx->FSW = fFsw;
7111 return;
7112 }
7113
7114 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7115 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7116 {
7117 /* All is fine, push the actual value. */
7118 pFpuCtx->FTW |= RT_BIT(iNewTop);
7119 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7120 }
7121 else if (pFpuCtx->FCW & X86_FCW_IM)
7122 {
7123 /* Masked stack overflow, push QNaN. */
7124 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7125 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7126 }
7127 else
7128 {
7129 /* Raise stack overflow, don't push anything. */
7130 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7131 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7132 return;
7133 }
7134
7135 fFsw &= ~X86_FSW_TOP_MASK;
7136 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7137 pFpuCtx->FSW = fFsw;
7138
7139 iemFpuRotateStackPush(pFpuCtx);
7140}
7141
7142
7143/**
7144 * Stores a result in a FPU register and updates the FSW and FTW.
7145 *
7146 * @param pFpuCtx The FPU context.
7147 * @param pResult The result to store.
7148 * @param iStReg Which FPU register to store it in.
7149 */
7150IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7151{
7152 Assert(iStReg < 8);
7153 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7154 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7155 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7156 pFpuCtx->FTW |= RT_BIT(iReg);
7157 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7158}
7159
7160
7161/**
7162 * Only updates the FPU status word (FSW) with the result of the current
7163 * instruction.
7164 *
7165 * @param pFpuCtx The FPU context.
7166 * @param u16FSW The FSW output of the current instruction.
7167 */
7168IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7169{
7170 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7171 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7172}
7173
7174
7175/**
7176 * Pops one item off the FPU stack if no pending exception prevents it.
7177 *
7178 * @param pFpuCtx The FPU context.
7179 */
7180IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7181{
7182 /* Check pending exceptions. */
7183 uint16_t uFSW = pFpuCtx->FSW;
7184 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7185 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7186 return;
7187
7188 /* TOP--. */
7189 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7190 uFSW &= ~X86_FSW_TOP_MASK;
7191 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7192 pFpuCtx->FSW = uFSW;
7193
7194 /* Mark the previous ST0 as empty. */
7195 iOldTop >>= X86_FSW_TOP_SHIFT;
7196 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7197
7198 /* Rotate the registers. */
7199 iemFpuRotateStackPop(pFpuCtx);
7200}
7201
7202
7203/**
7204 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7205 *
7206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7207 * @param pResult The FPU operation result to push.
7208 */
7209IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7210{
7211 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7212 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7213 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7214 iemFpuMaybePushResult(pResult, pFpuCtx);
7215}
7216
7217
7218/**
7219 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7220 * and sets FPUDP and FPUDS.
7221 *
7222 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7223 * @param pResult The FPU operation result to push.
7224 * @param iEffSeg The effective segment register.
7225 * @param GCPtrEff The effective address relative to @a iEffSeg.
7226 */
7227IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7228{
7229 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7230 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7231 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7232 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7233 iemFpuMaybePushResult(pResult, pFpuCtx);
7234}
7235
7236
7237/**
7238 * Replace ST0 with the first value and push the second onto the FPU stack,
7239 * unless a pending exception prevents it.
7240 *
7241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7242 * @param pResult The FPU operation result to store and push.
7243 */
7244IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7245{
7246 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7247 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7248 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7249
7250 /* Update FSW and bail if there are pending exceptions afterwards. */
7251 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7252 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7253 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7254 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7255 {
7256 pFpuCtx->FSW = fFsw;
7257 return;
7258 }
7259
7260 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7261 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7262 {
7263 /* All is fine, push the actual value. */
7264 pFpuCtx->FTW |= RT_BIT(iNewTop);
7265 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7266 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7267 }
7268 else if (pFpuCtx->FCW & X86_FCW_IM)
7269 {
7270 /* Masked stack overflow, push QNaN. */
7271 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7272 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7273 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7274 }
7275 else
7276 {
7277 /* Raise stack overflow, don't push anything. */
7278 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7279 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7280 return;
7281 }
7282
7283 fFsw &= ~X86_FSW_TOP_MASK;
7284 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7285 pFpuCtx->FSW = fFsw;
7286
7287 iemFpuRotateStackPush(pFpuCtx);
7288}
7289
7290
7291/**
7292 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7293 * FOP.
7294 *
7295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7296 * @param pResult The result to store.
7297 * @param iStReg Which FPU register to store it in.
7298 */
7299IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7300{
7301 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7302 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7303 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7304 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7305}
7306
7307
7308/**
7309 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7310 * FOP, and then pops the stack.
7311 *
7312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7313 * @param pResult The result to store.
7314 * @param iStReg Which FPU register to store it in.
7315 */
7316IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7317{
7318 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7319 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7320 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7321 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7322 iemFpuMaybePopOne(pFpuCtx);
7323}
7324
7325
7326/**
7327 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7328 * FPUDP, and FPUDS.
7329 *
7330 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7331 * @param pResult The result to store.
7332 * @param iStReg Which FPU register to store it in.
7333 * @param iEffSeg The effective memory operand selector register.
7334 * @param GCPtrEff The effective memory operand offset.
7335 */
7336IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7337 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7338{
7339 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7340 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7341 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7342 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7343 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7344}
7345
7346
7347/**
7348 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7349 * FPUDP, and FPUDS, and then pops the stack.
7350 *
7351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7352 * @param pResult The result to store.
7353 * @param iStReg Which FPU register to store it in.
7354 * @param iEffSeg The effective memory operand selector register.
7355 * @param GCPtrEff The effective memory operand offset.
7356 */
7357IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7358 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7359{
7360 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7361 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7362 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7363 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7364 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7365 iemFpuMaybePopOne(pFpuCtx);
7366}
7367
7368
7369/**
7370 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7371 *
7372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7373 */
7374IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7375{
7376 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7377 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7378 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7379}
7380
7381
7382/**
7383 * Marks the specified stack register as free (for FFREE).
7384 *
7385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7386 * @param iStReg The register to free.
7387 */
7388IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7389{
7390 Assert(iStReg < 8);
7391 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7392 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7393 pFpuCtx->FTW &= ~RT_BIT(iReg);
7394}
7395
7396
7397/**
7398 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7399 *
7400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7401 */
7402IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7403{
7404 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7405 uint16_t uFsw = pFpuCtx->FSW;
7406 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7407 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7408 uFsw &= ~X86_FSW_TOP_MASK;
7409 uFsw |= uTop;
7410 pFpuCtx->FSW = uFsw;
7411}
7412
7413
7414/**
7415 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7416 *
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 */
7419IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7420{
7421 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7422 uint16_t uFsw = pFpuCtx->FSW;
7423 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7424 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7425 uFsw &= ~X86_FSW_TOP_MASK;
7426 uFsw |= uTop;
7427 pFpuCtx->FSW = uFsw;
7428}
7429
7430
7431/**
7432 * Updates the FSW, FOP, FPUIP, and FPUCS.
7433 *
7434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7435 * @param u16FSW The FSW from the current instruction.
7436 */
7437IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7438{
7439 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7440 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7441 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7442 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7443}
7444
7445
7446/**
7447 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7448 *
7449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7450 * @param u16FSW The FSW from the current instruction.
7451 */
7452IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7453{
7454 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7455 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7456 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7457 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7458 iemFpuMaybePopOne(pFpuCtx);
7459}
7460
7461
7462/**
7463 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7464 *
7465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7466 * @param u16FSW The FSW from the current instruction.
7467 * @param iEffSeg The effective memory operand selector register.
7468 * @param GCPtrEff The effective memory operand offset.
7469 */
7470IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7471{
7472 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7473 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7474 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7475 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7476 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7477}
7478
7479
7480/**
7481 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7482 *
7483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7484 * @param u16FSW The FSW from the current instruction.
7485 */
7486IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7487{
7488 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7489 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7490 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7491 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7492 iemFpuMaybePopOne(pFpuCtx);
7493 iemFpuMaybePopOne(pFpuCtx);
7494}
7495
7496
7497/**
7498 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7499 *
7500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7501 * @param u16FSW The FSW from the current instruction.
7502 * @param iEffSeg The effective memory operand selector register.
7503 * @param GCPtrEff The effective memory operand offset.
7504 */
7505IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7506{
7507 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7508 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7509 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7510 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7511 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7512 iemFpuMaybePopOne(pFpuCtx);
7513}
7514
7515
7516/**
7517 * Worker routine for raising an FPU stack underflow exception.
7518 *
7519 * @param pFpuCtx The FPU context.
7520 * @param iStReg The stack register being accessed.
7521 */
7522IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7523{
7524 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7525 if (pFpuCtx->FCW & X86_FCW_IM)
7526 {
7527 /* Masked underflow. */
7528 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7529 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7530 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7531 if (iStReg != UINT8_MAX)
7532 {
7533 pFpuCtx->FTW |= RT_BIT(iReg);
7534 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7535 }
7536 }
7537 else
7538 {
7539 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7540 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7541 }
7542}
7543
7544
7545/**
7546 * Raises a FPU stack underflow exception.
7547 *
7548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7549 * @param iStReg The destination register that should be loaded
7550 * with QNaN if \#IS is not masked. Specify
7551 * UINT8_MAX if none (like for fcom).
7552 */
7553DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7554{
7555 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7556 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7557 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7558 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7559}
7560
7561
7562DECL_NO_INLINE(IEM_STATIC, void)
7563iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7564{
7565 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7566 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7567 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7568 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7569 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7570}
7571
7572
7573DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7574{
7575 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7576 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7577 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7578 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7579 iemFpuMaybePopOne(pFpuCtx);
7580}
7581
7582
7583DECL_NO_INLINE(IEM_STATIC, void)
7584iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7585{
7586 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7587 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7588 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7589 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7590 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7591 iemFpuMaybePopOne(pFpuCtx);
7592}
7593
7594
7595DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7596{
7597 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7598 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7599 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7600 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7601 iemFpuMaybePopOne(pFpuCtx);
7602 iemFpuMaybePopOne(pFpuCtx);
7603}
7604
7605
7606DECL_NO_INLINE(IEM_STATIC, void)
7607iemFpuStackPushUnderflow(PVMCPU pVCpu)
7608{
7609 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7610 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7611 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7612
7613 if (pFpuCtx->FCW & X86_FCW_IM)
7614 {
7615 /* Masked overflow - Push QNaN. */
7616 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7617 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7618 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7619 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7620 pFpuCtx->FTW |= RT_BIT(iNewTop);
7621 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7622 iemFpuRotateStackPush(pFpuCtx);
7623 }
7624 else
7625 {
7626 /* Exception pending - don't change TOP or the register stack. */
7627 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7628 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7629 }
7630}
7631
7632
7633DECL_NO_INLINE(IEM_STATIC, void)
7634iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7635{
7636 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7637 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7638 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7639
7640 if (pFpuCtx->FCW & X86_FCW_IM)
7641 {
7642 /* Masked overflow - Push QNaN. */
7643 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7644 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7645 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7646 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7647 pFpuCtx->FTW |= RT_BIT(iNewTop);
7648 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7649 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7650 iemFpuRotateStackPush(pFpuCtx);
7651 }
7652 else
7653 {
7654 /* Exception pending - don't change TOP or the register stack. */
7655 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7656 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7657 }
7658}
7659
7660
7661/**
7662 * Worker routine for raising an FPU stack overflow exception on a push.
7663 *
7664 * @param pFpuCtx The FPU context.
7665 */
7666IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7667{
7668 if (pFpuCtx->FCW & X86_FCW_IM)
7669 {
7670 /* Masked overflow. */
7671 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7672 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7673 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7674 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7675 pFpuCtx->FTW |= RT_BIT(iNewTop);
7676 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7677 iemFpuRotateStackPush(pFpuCtx);
7678 }
7679 else
7680 {
7681 /* Exception pending - don't change TOP or the register stack. */
7682 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7683 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7684 }
7685}
7686
7687
7688/**
7689 * Raises a FPU stack overflow exception on a push.
7690 *
7691 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7692 */
7693DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7694{
7695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7696 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7697 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7698 iemFpuStackPushOverflowOnly(pFpuCtx);
7699}
7700
7701
7702/**
7703 * Raises a FPU stack overflow exception on a push with a memory operand.
7704 *
7705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7706 * @param iEffSeg The effective memory operand selector register.
7707 * @param GCPtrEff The effective memory operand offset.
7708 */
7709DECL_NO_INLINE(IEM_STATIC, void)
7710iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7711{
7712 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
7713 PX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
7714 iemFpuUpdateDP(pVCpu, pCtx, pFpuCtx, iEffSeg, GCPtrEff);
7715 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pCtx, pFpuCtx);
7716 iemFpuStackPushOverflowOnly(pFpuCtx);
7717}
7718
7719
7720IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7721{
7722 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7723 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7724 if (pFpuCtx->FTW & RT_BIT(iReg))
7725 return VINF_SUCCESS;
7726 return VERR_NOT_FOUND;
7727}
7728
7729
7730IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7731{
7732 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7733 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7734 if (pFpuCtx->FTW & RT_BIT(iReg))
7735 {
7736 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7737 return VINF_SUCCESS;
7738 }
7739 return VERR_NOT_FOUND;
7740}
7741
7742
7743IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7744 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7745{
7746 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7747 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7748 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7749 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7750 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7751 {
7752 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7753 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7754 return VINF_SUCCESS;
7755 }
7756 return VERR_NOT_FOUND;
7757}
7758
7759
7760IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7761{
7762 PX86FXSTATE pFpuCtx = &IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87;
7763 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7764 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7765 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7766 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7767 {
7768 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7769 return VINF_SUCCESS;
7770 }
7771 return VERR_NOT_FOUND;
7772}
7773
7774
7775/**
7776 * Updates the FPU exception status after FCW is changed.
7777 *
7778 * @param pFpuCtx The FPU context.
7779 */
7780IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7781{
7782 uint16_t u16Fsw = pFpuCtx->FSW;
7783 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7784 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7785 else
7786 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7787 pFpuCtx->FSW = u16Fsw;
7788}
7789
7790
7791/**
7792 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7793 *
7794 * @returns The full FTW.
7795 * @param pFpuCtx The FPU context.
7796 */
7797IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7798{
7799 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7800 uint16_t u16Ftw = 0;
7801 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7802 for (unsigned iSt = 0; iSt < 8; iSt++)
7803 {
7804 unsigned const iReg = (iSt + iTop) & 7;
7805 if (!(u8Ftw & RT_BIT(iReg)))
7806 u16Ftw |= 3 << (iReg * 2); /* empty */
7807 else
7808 {
7809 uint16_t uTag;
7810 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7811 if (pr80Reg->s.uExponent == 0x7fff)
7812 uTag = 2; /* Exponent is all 1's => Special. */
7813 else if (pr80Reg->s.uExponent == 0x0000)
7814 {
7815 if (pr80Reg->s.u64Mantissa == 0x0000)
7816 uTag = 1; /* All bits are zero => Zero. */
7817 else
7818 uTag = 2; /* Must be special. */
7819 }
7820 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7821 uTag = 0; /* Valid. */
7822 else
7823 uTag = 2; /* Must be special. */
7824
7825 u16Ftw |= uTag << (iReg * 2); /* empty */
7826 }
7827 }
7828
7829 return u16Ftw;
7830}
7831
7832
7833/**
7834 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7835 *
7836 * @returns The compressed FTW.
7837 * @param u16FullFtw The full FTW to convert.
7838 */
7839IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7840{
7841 uint8_t u8Ftw = 0;
7842 for (unsigned i = 0; i < 8; i++)
7843 {
7844 if ((u16FullFtw & 3) != 3 /*empty*/)
7845 u8Ftw |= RT_BIT(i);
7846 u16FullFtw >>= 2;
7847 }
7848
7849 return u8Ftw;
7850}
7851
7852/** @} */
7853
7854
7855/** @name Memory access.
7856 *
7857 * @{
7858 */
7859
7860
7861/**
7862 * Updates the IEMCPU::cbWritten counter if applicable.
7863 *
7864 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7865 * @param fAccess The access being accounted for.
7866 * @param cbMem The access size.
7867 */
7868DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7869{
7870 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7871 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7872 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7873}
7874
7875
7876/**
7877 * Checks if the given segment can be written to, raise the appropriate
7878 * exception if not.
7879 *
7880 * @returns VBox strict status code.
7881 *
7882 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7883 * @param pHid Pointer to the hidden register.
7884 * @param iSegReg The register number.
7885 * @param pu64BaseAddr Where to return the base address to use for the
7886 * segment. (In 64-bit code it may differ from the
7887 * base in the hidden segment.)
7888 */
7889IEM_STATIC VBOXSTRICTRC
7890iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7891{
7892 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7893
7894 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7895 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7896 else
7897 {
7898 if (!pHid->Attr.n.u1Present)
7899 {
7900 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7901 AssertRelease(uSel == 0);
7902 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7903 return iemRaiseGeneralProtectionFault0(pVCpu);
7904 }
7905
7906 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7907 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7908 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7909 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7910 *pu64BaseAddr = pHid->u64Base;
7911 }
7912 return VINF_SUCCESS;
7913}
7914
7915
7916/**
7917 * Checks if the given segment can be read from, raise the appropriate
7918 * exception if not.
7919 *
7920 * @returns VBox strict status code.
7921 *
7922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7923 * @param pHid Pointer to the hidden register.
7924 * @param iSegReg The register number.
7925 * @param pu64BaseAddr Where to return the base address to use for the
7926 * segment. (In 64-bit code it may differ from the
7927 * base in the hidden segment.)
7928 */
7929IEM_STATIC VBOXSTRICTRC
7930iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7931{
7932 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7933
7934 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7935 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7936 else
7937 {
7938 if (!pHid->Attr.n.u1Present)
7939 {
7940 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7941 AssertRelease(uSel == 0);
7942 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7943 return iemRaiseGeneralProtectionFault0(pVCpu);
7944 }
7945
7946 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7947 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7948 *pu64BaseAddr = pHid->u64Base;
7949 }
7950 return VINF_SUCCESS;
7951}
7952
7953
7954/**
7955 * Applies the segment limit, base and attributes.
7956 *
7957 * This may raise a \#GP or \#SS.
7958 *
7959 * @returns VBox strict status code.
7960 *
7961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7962 * @param fAccess The kind of access which is being performed.
7963 * @param iSegReg The index of the segment register to apply.
7964 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7965 * TSS, ++).
7966 * @param cbMem The access size.
7967 * @param pGCPtrMem Pointer to the guest memory address to apply
7968 * segmentation to. Input and output parameter.
7969 */
7970IEM_STATIC VBOXSTRICTRC
7971iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7972{
7973 if (iSegReg == UINT8_MAX)
7974 return VINF_SUCCESS;
7975
7976 IEM_CTX_IMPORT_RET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7977 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7978 switch (pVCpu->iem.s.enmCpuMode)
7979 {
7980 case IEMMODE_16BIT:
7981 case IEMMODE_32BIT:
7982 {
7983 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7984 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7985
7986 if ( pSel->Attr.n.u1Present
7987 && !pSel->Attr.n.u1Unusable)
7988 {
7989 Assert(pSel->Attr.n.u1DescType);
7990 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
7991 {
7992 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
7993 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7994 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
7995
7996 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
7997 {
7998 /** @todo CPL check. */
7999 }
8000
8001 /*
8002 * There are two kinds of data selectors, normal and expand down.
8003 */
8004 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8005 {
8006 if ( GCPtrFirst32 > pSel->u32Limit
8007 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8008 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8009 }
8010 else
8011 {
8012 /*
8013 * The upper boundary is defined by the B bit, not the G bit!
8014 */
8015 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8016 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8017 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8018 }
8019 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8020 }
8021 else
8022 {
8023
8024 /*
8025 * Code selector and usually be used to read thru, writing is
8026 * only permitted in real and V8086 mode.
8027 */
8028 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8029 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8030 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8031 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8032 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8033
8034 if ( GCPtrFirst32 > pSel->u32Limit
8035 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8036 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8037
8038 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8039 {
8040 /** @todo CPL check. */
8041 }
8042
8043 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8044 }
8045 }
8046 else
8047 return iemRaiseGeneralProtectionFault0(pVCpu);
8048 return VINF_SUCCESS;
8049 }
8050
8051 case IEMMODE_64BIT:
8052 {
8053 RTGCPTR GCPtrMem = *pGCPtrMem;
8054 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8055 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8056
8057 Assert(cbMem >= 1);
8058 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8059 return VINF_SUCCESS;
8060 return iemRaiseGeneralProtectionFault0(pVCpu);
8061 }
8062
8063 default:
8064 AssertFailedReturn(VERR_IEM_IPE_7);
8065 }
8066}
8067
8068
8069/**
8070 * Translates a virtual address to a physical physical address and checks if we
8071 * can access the page as specified.
8072 *
8073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8074 * @param GCPtrMem The virtual address.
8075 * @param fAccess The intended access.
8076 * @param pGCPhysMem Where to return the physical address.
8077 */
8078IEM_STATIC VBOXSTRICTRC
8079iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8080{
8081 /** @todo Need a different PGM interface here. We're currently using
8082 * generic / REM interfaces. this won't cut it for R0 & RC. */
8083 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8084 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8085 RTGCPHYS GCPhys;
8086 uint64_t fFlags;
8087 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8088 if (RT_FAILURE(rc))
8089 {
8090 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8091 /** @todo Check unassigned memory in unpaged mode. */
8092 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8093 *pGCPhysMem = NIL_RTGCPHYS;
8094 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8095 }
8096
8097 /* If the page is writable and does not have the no-exec bit set, all
8098 access is allowed. Otherwise we'll have to check more carefully... */
8099 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8100 {
8101 /* Write to read only memory? */
8102 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8103 && !(fFlags & X86_PTE_RW)
8104 && ( (pVCpu->iem.s.uCpl == 3
8105 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8106 || (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_WP)))
8107 {
8108 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8109 *pGCPhysMem = NIL_RTGCPHYS;
8110 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8111 }
8112
8113 /* Kernel memory accessed by userland? */
8114 if ( !(fFlags & X86_PTE_US)
8115 && pVCpu->iem.s.uCpl == 3
8116 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8117 {
8118 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8119 *pGCPhysMem = NIL_RTGCPHYS;
8120 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8121 }
8122
8123 /* Executing non-executable memory? */
8124 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8125 && (fFlags & X86_PTE_PAE_NX)
8126 && (IEM_GET_CTX(pVCpu)->msrEFER & MSR_K6_EFER_NXE) )
8127 {
8128 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8129 *pGCPhysMem = NIL_RTGCPHYS;
8130 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8131 VERR_ACCESS_DENIED);
8132 }
8133 }
8134
8135 /*
8136 * Set the dirty / access flags.
8137 * ASSUMES this is set when the address is translated rather than on committ...
8138 */
8139 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8140 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8141 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8142 {
8143 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8144 AssertRC(rc2);
8145 }
8146
8147 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8148 *pGCPhysMem = GCPhys;
8149 return VINF_SUCCESS;
8150}
8151
8152
8153
8154/**
8155 * Maps a physical page.
8156 *
8157 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8159 * @param GCPhysMem The physical address.
8160 * @param fAccess The intended access.
8161 * @param ppvMem Where to return the mapping address.
8162 * @param pLock The PGM lock.
8163 */
8164IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8165{
8166#ifdef IEM_LOG_MEMORY_WRITES
8167 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8168 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8169#endif
8170
8171 /** @todo This API may require some improving later. A private deal with PGM
8172 * regarding locking and unlocking needs to be struct. A couple of TLBs
8173 * living in PGM, but with publicly accessible inlined access methods
8174 * could perhaps be an even better solution. */
8175 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8176 GCPhysMem,
8177 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8178 pVCpu->iem.s.fBypassHandlers,
8179 ppvMem,
8180 pLock);
8181 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8182 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8183
8184 return rc;
8185}
8186
8187
8188/**
8189 * Unmap a page previously mapped by iemMemPageMap.
8190 *
8191 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8192 * @param GCPhysMem The physical address.
8193 * @param fAccess The intended access.
8194 * @param pvMem What iemMemPageMap returned.
8195 * @param pLock The PGM lock.
8196 */
8197DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8198{
8199 NOREF(pVCpu);
8200 NOREF(GCPhysMem);
8201 NOREF(fAccess);
8202 NOREF(pvMem);
8203 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8204}
8205
8206
8207/**
8208 * Looks up a memory mapping entry.
8209 *
8210 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8212 * @param pvMem The memory address.
8213 * @param fAccess The access to.
8214 */
8215DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8216{
8217 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8218 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8219 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8220 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8221 return 0;
8222 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8223 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8224 return 1;
8225 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8226 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8227 return 2;
8228 return VERR_NOT_FOUND;
8229}
8230
8231
8232/**
8233 * Finds a free memmap entry when using iNextMapping doesn't work.
8234 *
8235 * @returns Memory mapping index, 1024 on failure.
8236 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8237 */
8238IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8239{
8240 /*
8241 * The easy case.
8242 */
8243 if (pVCpu->iem.s.cActiveMappings == 0)
8244 {
8245 pVCpu->iem.s.iNextMapping = 1;
8246 return 0;
8247 }
8248
8249 /* There should be enough mappings for all instructions. */
8250 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8251
8252 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8253 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8254 return i;
8255
8256 AssertFailedReturn(1024);
8257}
8258
8259
8260/**
8261 * Commits a bounce buffer that needs writing back and unmaps it.
8262 *
8263 * @returns Strict VBox status code.
8264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8265 * @param iMemMap The index of the buffer to commit.
8266 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8267 * Always false in ring-3, obviously.
8268 */
8269IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8270{
8271 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8272 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8273#ifdef IN_RING3
8274 Assert(!fPostponeFail);
8275 RT_NOREF_PV(fPostponeFail);
8276#endif
8277
8278 /*
8279 * Do the writing.
8280 */
8281 PVM pVM = pVCpu->CTX_SUFF(pVM);
8282 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8283 {
8284 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8285 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8286 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8287 if (!pVCpu->iem.s.fBypassHandlers)
8288 {
8289 /*
8290 * Carefully and efficiently dealing with access handler return
8291 * codes make this a little bloated.
8292 */
8293 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8294 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8295 pbBuf,
8296 cbFirst,
8297 PGMACCESSORIGIN_IEM);
8298 if (rcStrict == VINF_SUCCESS)
8299 {
8300 if (cbSecond)
8301 {
8302 rcStrict = PGMPhysWrite(pVM,
8303 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8304 pbBuf + cbFirst,
8305 cbSecond,
8306 PGMACCESSORIGIN_IEM);
8307 if (rcStrict == VINF_SUCCESS)
8308 { /* nothing */ }
8309 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8310 {
8311 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8312 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8313 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8314 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8315 }
8316#ifndef IN_RING3
8317 else if (fPostponeFail)
8318 {
8319 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8320 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8321 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8322 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8323 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8324 return iemSetPassUpStatus(pVCpu, rcStrict);
8325 }
8326#endif
8327 else
8328 {
8329 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8330 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8331 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8332 return rcStrict;
8333 }
8334 }
8335 }
8336 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8337 {
8338 if (!cbSecond)
8339 {
8340 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8341 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8342 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8343 }
8344 else
8345 {
8346 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8347 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8348 pbBuf + cbFirst,
8349 cbSecond,
8350 PGMACCESSORIGIN_IEM);
8351 if (rcStrict2 == VINF_SUCCESS)
8352 {
8353 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8354 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8355 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8356 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8357 }
8358 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8359 {
8360 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8363 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8364 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8365 }
8366#ifndef IN_RING3
8367 else if (fPostponeFail)
8368 {
8369 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8370 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8371 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8372 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8373 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8374 return iemSetPassUpStatus(pVCpu, rcStrict);
8375 }
8376#endif
8377 else
8378 {
8379 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8380 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8381 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8382 return rcStrict2;
8383 }
8384 }
8385 }
8386#ifndef IN_RING3
8387 else if (fPostponeFail)
8388 {
8389 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8390 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8392 if (!cbSecond)
8393 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8394 else
8395 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8396 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8397 return iemSetPassUpStatus(pVCpu, rcStrict);
8398 }
8399#endif
8400 else
8401 {
8402 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8405 return rcStrict;
8406 }
8407 }
8408 else
8409 {
8410 /*
8411 * No access handlers, much simpler.
8412 */
8413 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8414 if (RT_SUCCESS(rc))
8415 {
8416 if (cbSecond)
8417 {
8418 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8419 if (RT_SUCCESS(rc))
8420 { /* likely */ }
8421 else
8422 {
8423 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8426 return rc;
8427 }
8428 }
8429 }
8430 else
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8435 return rc;
8436 }
8437 }
8438 }
8439
8440#if defined(IEM_LOG_MEMORY_WRITES)
8441 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8442 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8443 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8444 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8445 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8446 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8447
8448 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8449 g_cbIemWrote = cbWrote;
8450 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8451#endif
8452
8453 /*
8454 * Free the mapping entry.
8455 */
8456 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8457 Assert(pVCpu->iem.s.cActiveMappings != 0);
8458 pVCpu->iem.s.cActiveMappings--;
8459 return VINF_SUCCESS;
8460}
8461
8462
8463/**
8464 * iemMemMap worker that deals with a request crossing pages.
8465 */
8466IEM_STATIC VBOXSTRICTRC
8467iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8468{
8469 /*
8470 * Do the address translations.
8471 */
8472 RTGCPHYS GCPhysFirst;
8473 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8474 if (rcStrict != VINF_SUCCESS)
8475 return rcStrict;
8476
8477 RTGCPHYS GCPhysSecond;
8478 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8479 fAccess, &GCPhysSecond);
8480 if (rcStrict != VINF_SUCCESS)
8481 return rcStrict;
8482 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8483
8484 PVM pVM = pVCpu->CTX_SUFF(pVM);
8485
8486 /*
8487 * Read in the current memory content if it's a read, execute or partial
8488 * write access.
8489 */
8490 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8491 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8492 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8493
8494 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8495 {
8496 if (!pVCpu->iem.s.fBypassHandlers)
8497 {
8498 /*
8499 * Must carefully deal with access handler status codes here,
8500 * makes the code a bit bloated.
8501 */
8502 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8503 if (rcStrict == VINF_SUCCESS)
8504 {
8505 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8506 if (rcStrict == VINF_SUCCESS)
8507 { /*likely */ }
8508 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8509 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8510 else
8511 {
8512 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8513 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8514 return rcStrict;
8515 }
8516 }
8517 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8518 {
8519 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8520 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8521 {
8522 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8523 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8524 }
8525 else
8526 {
8527 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8528 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8529 return rcStrict2;
8530 }
8531 }
8532 else
8533 {
8534 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8535 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8536 return rcStrict;
8537 }
8538 }
8539 else
8540 {
8541 /*
8542 * No informational status codes here, much more straight forward.
8543 */
8544 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8545 if (RT_SUCCESS(rc))
8546 {
8547 Assert(rc == VINF_SUCCESS);
8548 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8549 if (RT_SUCCESS(rc))
8550 Assert(rc == VINF_SUCCESS);
8551 else
8552 {
8553 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8554 return rc;
8555 }
8556 }
8557 else
8558 {
8559 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8560 return rc;
8561 }
8562 }
8563 }
8564#ifdef VBOX_STRICT
8565 else
8566 memset(pbBuf, 0xcc, cbMem);
8567 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8568 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8569#endif
8570
8571 /*
8572 * Commit the bounce buffer entry.
8573 */
8574 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8575 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8576 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8577 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8578 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8579 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8580 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8581 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8582 pVCpu->iem.s.cActiveMappings++;
8583
8584 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8585 *ppvMem = pbBuf;
8586 return VINF_SUCCESS;
8587}
8588
8589
8590/**
8591 * iemMemMap woker that deals with iemMemPageMap failures.
8592 */
8593IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8594 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8595{
8596 /*
8597 * Filter out conditions we can handle and the ones which shouldn't happen.
8598 */
8599 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8600 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8601 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8602 {
8603 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8604 return rcMap;
8605 }
8606 pVCpu->iem.s.cPotentialExits++;
8607
8608 /*
8609 * Read in the current memory content if it's a read, execute or partial
8610 * write access.
8611 */
8612 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8613 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8614 {
8615 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8616 memset(pbBuf, 0xff, cbMem);
8617 else
8618 {
8619 int rc;
8620 if (!pVCpu->iem.s.fBypassHandlers)
8621 {
8622 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8623 if (rcStrict == VINF_SUCCESS)
8624 { /* nothing */ }
8625 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8626 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8627 else
8628 {
8629 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8630 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8631 return rcStrict;
8632 }
8633 }
8634 else
8635 {
8636 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8637 if (RT_SUCCESS(rc))
8638 { /* likely */ }
8639 else
8640 {
8641 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8642 GCPhysFirst, rc));
8643 return rc;
8644 }
8645 }
8646 }
8647 }
8648#ifdef VBOX_STRICT
8649 else
8650 memset(pbBuf, 0xcc, cbMem);
8651#endif
8652#ifdef VBOX_STRICT
8653 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8654 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8655#endif
8656
8657 /*
8658 * Commit the bounce buffer entry.
8659 */
8660 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8661 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8662 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8663 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8664 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8665 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8666 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8667 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8668 pVCpu->iem.s.cActiveMappings++;
8669
8670 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8671 *ppvMem = pbBuf;
8672 return VINF_SUCCESS;
8673}
8674
8675
8676
8677/**
8678 * Maps the specified guest memory for the given kind of access.
8679 *
8680 * This may be using bounce buffering of the memory if it's crossing a page
8681 * boundary or if there is an access handler installed for any of it. Because
8682 * of lock prefix guarantees, we're in for some extra clutter when this
8683 * happens.
8684 *
8685 * This may raise a \#GP, \#SS, \#PF or \#AC.
8686 *
8687 * @returns VBox strict status code.
8688 *
8689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8690 * @param ppvMem Where to return the pointer to the mapped
8691 * memory.
8692 * @param cbMem The number of bytes to map. This is usually 1,
8693 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8694 * string operations it can be up to a page.
8695 * @param iSegReg The index of the segment register to use for
8696 * this access. The base and limits are checked.
8697 * Use UINT8_MAX to indicate that no segmentation
8698 * is required (for IDT, GDT and LDT accesses).
8699 * @param GCPtrMem The address of the guest memory.
8700 * @param fAccess How the memory is being accessed. The
8701 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8702 * how to map the memory, while the
8703 * IEM_ACCESS_WHAT_XXX bit is used when raising
8704 * exceptions.
8705 */
8706IEM_STATIC VBOXSTRICTRC
8707iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8708{
8709 /*
8710 * Check the input and figure out which mapping entry to use.
8711 */
8712 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8713 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8714 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8715
8716 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8717 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8718 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8719 {
8720 iMemMap = iemMemMapFindFree(pVCpu);
8721 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8722 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8723 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8724 pVCpu->iem.s.aMemMappings[2].fAccess),
8725 VERR_IEM_IPE_9);
8726 }
8727
8728 /*
8729 * Map the memory, checking that we can actually access it. If something
8730 * slightly complicated happens, fall back on bounce buffering.
8731 */
8732 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8733 if (rcStrict != VINF_SUCCESS)
8734 return rcStrict;
8735
8736 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8737 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8738
8739 RTGCPHYS GCPhysFirst;
8740 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8741 if (rcStrict != VINF_SUCCESS)
8742 return rcStrict;
8743
8744 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8745 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8746 if (fAccess & IEM_ACCESS_TYPE_READ)
8747 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8748
8749 void *pvMem;
8750 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8751 if (rcStrict != VINF_SUCCESS)
8752 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8753
8754 /*
8755 * Fill in the mapping table entry.
8756 */
8757 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8758 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8759 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8760 pVCpu->iem.s.cActiveMappings++;
8761
8762 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8763 *ppvMem = pvMem;
8764 return VINF_SUCCESS;
8765}
8766
8767
8768/**
8769 * Commits the guest memory if bounce buffered and unmaps it.
8770 *
8771 * @returns Strict VBox status code.
8772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8773 * @param pvMem The mapping.
8774 * @param fAccess The kind of access.
8775 */
8776IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8777{
8778 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8779 AssertReturn(iMemMap >= 0, iMemMap);
8780
8781 /* If it's bounce buffered, we may need to write back the buffer. */
8782 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8783 {
8784 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8785 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8786 }
8787 /* Otherwise unlock it. */
8788 else
8789 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8790
8791 /* Free the entry. */
8792 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8793 Assert(pVCpu->iem.s.cActiveMappings != 0);
8794 pVCpu->iem.s.cActiveMappings--;
8795 return VINF_SUCCESS;
8796}
8797
8798#ifdef IEM_WITH_SETJMP
8799
8800/**
8801 * Maps the specified guest memory for the given kind of access, longjmp on
8802 * error.
8803 *
8804 * This may be using bounce buffering of the memory if it's crossing a page
8805 * boundary or if there is an access handler installed for any of it. Because
8806 * of lock prefix guarantees, we're in for some extra clutter when this
8807 * happens.
8808 *
8809 * This may raise a \#GP, \#SS, \#PF or \#AC.
8810 *
8811 * @returns Pointer to the mapped memory.
8812 *
8813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8814 * @param cbMem The number of bytes to map. This is usually 1,
8815 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8816 * string operations it can be up to a page.
8817 * @param iSegReg The index of the segment register to use for
8818 * this access. The base and limits are checked.
8819 * Use UINT8_MAX to indicate that no segmentation
8820 * is required (for IDT, GDT and LDT accesses).
8821 * @param GCPtrMem The address of the guest memory.
8822 * @param fAccess How the memory is being accessed. The
8823 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8824 * how to map the memory, while the
8825 * IEM_ACCESS_WHAT_XXX bit is used when raising
8826 * exceptions.
8827 */
8828IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8829{
8830 /*
8831 * Check the input and figure out which mapping entry to use.
8832 */
8833 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8834 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8835 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8836
8837 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8838 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8839 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8840 {
8841 iMemMap = iemMemMapFindFree(pVCpu);
8842 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8843 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8844 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8845 pVCpu->iem.s.aMemMappings[2].fAccess),
8846 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8847 }
8848
8849 /*
8850 * Map the memory, checking that we can actually access it. If something
8851 * slightly complicated happens, fall back on bounce buffering.
8852 */
8853 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8854 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8855 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8856
8857 /* Crossing a page boundary? */
8858 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8859 { /* No (likely). */ }
8860 else
8861 {
8862 void *pvMem;
8863 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8864 if (rcStrict == VINF_SUCCESS)
8865 return pvMem;
8866 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8867 }
8868
8869 RTGCPHYS GCPhysFirst;
8870 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8871 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8872 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8873
8874 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8875 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8876 if (fAccess & IEM_ACCESS_TYPE_READ)
8877 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8878
8879 void *pvMem;
8880 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8881 if (rcStrict == VINF_SUCCESS)
8882 { /* likely */ }
8883 else
8884 {
8885 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8886 if (rcStrict == VINF_SUCCESS)
8887 return pvMem;
8888 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8889 }
8890
8891 /*
8892 * Fill in the mapping table entry.
8893 */
8894 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8895 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8896 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8897 pVCpu->iem.s.cActiveMappings++;
8898
8899 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8900 return pvMem;
8901}
8902
8903
8904/**
8905 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8906 *
8907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8908 * @param pvMem The mapping.
8909 * @param fAccess The kind of access.
8910 */
8911IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8912{
8913 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8914 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8915
8916 /* If it's bounce buffered, we may need to write back the buffer. */
8917 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8918 {
8919 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8920 {
8921 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8922 if (rcStrict == VINF_SUCCESS)
8923 return;
8924 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8925 }
8926 }
8927 /* Otherwise unlock it. */
8928 else
8929 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8930
8931 /* Free the entry. */
8932 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8933 Assert(pVCpu->iem.s.cActiveMappings != 0);
8934 pVCpu->iem.s.cActiveMappings--;
8935}
8936
8937#endif /* IEM_WITH_SETJMP */
8938
8939#ifndef IN_RING3
8940/**
8941 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8942 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8943 *
8944 * Allows the instruction to be completed and retired, while the IEM user will
8945 * return to ring-3 immediately afterwards and do the postponed writes there.
8946 *
8947 * @returns VBox status code (no strict statuses). Caller must check
8948 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8950 * @param pvMem The mapping.
8951 * @param fAccess The kind of access.
8952 */
8953IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8954{
8955 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8956 AssertReturn(iMemMap >= 0, iMemMap);
8957
8958 /* If it's bounce buffered, we may need to write back the buffer. */
8959 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8960 {
8961 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8962 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8963 }
8964 /* Otherwise unlock it. */
8965 else
8966 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8967
8968 /* Free the entry. */
8969 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8970 Assert(pVCpu->iem.s.cActiveMappings != 0);
8971 pVCpu->iem.s.cActiveMappings--;
8972 return VINF_SUCCESS;
8973}
8974#endif
8975
8976
8977/**
8978 * Rollbacks mappings, releasing page locks and such.
8979 *
8980 * The caller shall only call this after checking cActiveMappings.
8981 *
8982 * @returns Strict VBox status code to pass up.
8983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8984 */
8985IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
8986{
8987 Assert(pVCpu->iem.s.cActiveMappings > 0);
8988
8989 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
8990 while (iMemMap-- > 0)
8991 {
8992 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
8993 if (fAccess != IEM_ACCESS_INVALID)
8994 {
8995 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
8996 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8997 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
8998 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8999 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9000 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9001 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9002 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9003 pVCpu->iem.s.cActiveMappings--;
9004 }
9005 }
9006}
9007
9008
9009/**
9010 * Fetches a data byte.
9011 *
9012 * @returns Strict VBox status code.
9013 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9014 * @param pu8Dst Where to return the byte.
9015 * @param iSegReg The index of the segment register to use for
9016 * this access. The base and limits are checked.
9017 * @param GCPtrMem The address of the guest memory.
9018 */
9019IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9020{
9021 /* The lazy approach for now... */
9022 uint8_t const *pu8Src;
9023 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9024 if (rc == VINF_SUCCESS)
9025 {
9026 *pu8Dst = *pu8Src;
9027 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9028 }
9029 return rc;
9030}
9031
9032
9033#ifdef IEM_WITH_SETJMP
9034/**
9035 * Fetches a data byte, longjmp on error.
9036 *
9037 * @returns The byte.
9038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9039 * @param iSegReg The index of the segment register to use for
9040 * this access. The base and limits are checked.
9041 * @param GCPtrMem The address of the guest memory.
9042 */
9043DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9044{
9045 /* The lazy approach for now... */
9046 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9047 uint8_t const bRet = *pu8Src;
9048 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9049 return bRet;
9050}
9051#endif /* IEM_WITH_SETJMP */
9052
9053
9054/**
9055 * Fetches a data word.
9056 *
9057 * @returns Strict VBox status code.
9058 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9059 * @param pu16Dst Where to return the word.
9060 * @param iSegReg The index of the segment register to use for
9061 * this access. The base and limits are checked.
9062 * @param GCPtrMem The address of the guest memory.
9063 */
9064IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9065{
9066 /* The lazy approach for now... */
9067 uint16_t const *pu16Src;
9068 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9069 if (rc == VINF_SUCCESS)
9070 {
9071 *pu16Dst = *pu16Src;
9072 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9073 }
9074 return rc;
9075}
9076
9077
9078#ifdef IEM_WITH_SETJMP
9079/**
9080 * Fetches a data word, longjmp on error.
9081 *
9082 * @returns The word
9083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9084 * @param iSegReg The index of the segment register to use for
9085 * this access. The base and limits are checked.
9086 * @param GCPtrMem The address of the guest memory.
9087 */
9088DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9089{
9090 /* The lazy approach for now... */
9091 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9092 uint16_t const u16Ret = *pu16Src;
9093 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9094 return u16Ret;
9095}
9096#endif
9097
9098
9099/**
9100 * Fetches a data dword.
9101 *
9102 * @returns Strict VBox status code.
9103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9104 * @param pu32Dst Where to return the dword.
9105 * @param iSegReg The index of the segment register to use for
9106 * this access. The base and limits are checked.
9107 * @param GCPtrMem The address of the guest memory.
9108 */
9109IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9110{
9111 /* The lazy approach for now... */
9112 uint32_t const *pu32Src;
9113 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9114 if (rc == VINF_SUCCESS)
9115 {
9116 *pu32Dst = *pu32Src;
9117 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9118 }
9119 return rc;
9120}
9121
9122
9123#ifdef IEM_WITH_SETJMP
9124
9125IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9126{
9127 Assert(cbMem >= 1);
9128 Assert(iSegReg < X86_SREG_COUNT);
9129
9130 /*
9131 * 64-bit mode is simpler.
9132 */
9133 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9134 {
9135 if (iSegReg >= X86_SREG_FS)
9136 {
9137 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9138 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9139 GCPtrMem += pSel->u64Base;
9140 }
9141
9142 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9143 return GCPtrMem;
9144 }
9145 /*
9146 * 16-bit and 32-bit segmentation.
9147 */
9148 else
9149 {
9150 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9151 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9152 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9153 == X86DESCATTR_P /* data, expand up */
9154 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9155 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9156 {
9157 /* expand up */
9158 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9159 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9160 && GCPtrLast32 > (uint32_t)GCPtrMem))
9161 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9162 }
9163 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9164 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9165 {
9166 /* expand down */
9167 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9168 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9169 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9170 && GCPtrLast32 > (uint32_t)GCPtrMem))
9171 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9172 }
9173 else
9174 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9175 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9176 }
9177 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9178}
9179
9180
9181IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9182{
9183 Assert(cbMem >= 1);
9184 Assert(iSegReg < X86_SREG_COUNT);
9185
9186 /*
9187 * 64-bit mode is simpler.
9188 */
9189 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9190 {
9191 if (iSegReg >= X86_SREG_FS)
9192 {
9193 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9194 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9195 GCPtrMem += pSel->u64Base;
9196 }
9197
9198 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9199 return GCPtrMem;
9200 }
9201 /*
9202 * 16-bit and 32-bit segmentation.
9203 */
9204 else
9205 {
9206 IEM_CTX_IMPORT_JMP(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9207 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9208 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9209 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9210 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9211 {
9212 /* expand up */
9213 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9214 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9215 && GCPtrLast32 > (uint32_t)GCPtrMem))
9216 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9217 }
9218 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9219 {
9220 /* expand down */
9221 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9222 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9223 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9224 && GCPtrLast32 > (uint32_t)GCPtrMem))
9225 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9226 }
9227 else
9228 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9229 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9230 }
9231 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9232}
9233
9234
9235/**
9236 * Fetches a data dword, longjmp on error, fallback/safe version.
9237 *
9238 * @returns The dword
9239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9240 * @param iSegReg The index of the segment register to use for
9241 * this access. The base and limits are checked.
9242 * @param GCPtrMem The address of the guest memory.
9243 */
9244IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9245{
9246 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9247 uint32_t const u32Ret = *pu32Src;
9248 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9249 return u32Ret;
9250}
9251
9252
9253/**
9254 * Fetches a data dword, longjmp on error.
9255 *
9256 * @returns The dword
9257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9258 * @param iSegReg The index of the segment register to use for
9259 * this access. The base and limits are checked.
9260 * @param GCPtrMem The address of the guest memory.
9261 */
9262DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9263{
9264# ifdef IEM_WITH_DATA_TLB
9265 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9266 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9267 {
9268 /// @todo more later.
9269 }
9270
9271 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9272# else
9273 /* The lazy approach. */
9274 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9275 uint32_t const u32Ret = *pu32Src;
9276 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9277 return u32Ret;
9278# endif
9279}
9280#endif
9281
9282
9283#ifdef SOME_UNUSED_FUNCTION
9284/**
9285 * Fetches a data dword and sign extends it to a qword.
9286 *
9287 * @returns Strict VBox status code.
9288 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9289 * @param pu64Dst Where to return the sign extended value.
9290 * @param iSegReg The index of the segment register to use for
9291 * this access. The base and limits are checked.
9292 * @param GCPtrMem The address of the guest memory.
9293 */
9294IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9295{
9296 /* The lazy approach for now... */
9297 int32_t const *pi32Src;
9298 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9299 if (rc == VINF_SUCCESS)
9300 {
9301 *pu64Dst = *pi32Src;
9302 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9303 }
9304#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9305 else
9306 *pu64Dst = 0;
9307#endif
9308 return rc;
9309}
9310#endif
9311
9312
9313/**
9314 * Fetches a data qword.
9315 *
9316 * @returns Strict VBox status code.
9317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9318 * @param pu64Dst Where to return the qword.
9319 * @param iSegReg The index of the segment register to use for
9320 * this access. The base and limits are checked.
9321 * @param GCPtrMem The address of the guest memory.
9322 */
9323IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9324{
9325 /* The lazy approach for now... */
9326 uint64_t const *pu64Src;
9327 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9328 if (rc == VINF_SUCCESS)
9329 {
9330 *pu64Dst = *pu64Src;
9331 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9332 }
9333 return rc;
9334}
9335
9336
9337#ifdef IEM_WITH_SETJMP
9338/**
9339 * Fetches a data qword, longjmp on error.
9340 *
9341 * @returns The qword.
9342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9343 * @param iSegReg The index of the segment register to use for
9344 * this access. The base and limits are checked.
9345 * @param GCPtrMem The address of the guest memory.
9346 */
9347DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9348{
9349 /* The lazy approach for now... */
9350 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9351 uint64_t const u64Ret = *pu64Src;
9352 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9353 return u64Ret;
9354}
9355#endif
9356
9357
9358/**
9359 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9360 *
9361 * @returns Strict VBox status code.
9362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9363 * @param pu64Dst Where to return the qword.
9364 * @param iSegReg The index of the segment register to use for
9365 * this access. The base and limits are checked.
9366 * @param GCPtrMem The address of the guest memory.
9367 */
9368IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9369{
9370 /* The lazy approach for now... */
9371 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9372 if (RT_UNLIKELY(GCPtrMem & 15))
9373 return iemRaiseGeneralProtectionFault0(pVCpu);
9374
9375 uint64_t const *pu64Src;
9376 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9377 if (rc == VINF_SUCCESS)
9378 {
9379 *pu64Dst = *pu64Src;
9380 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9381 }
9382 return rc;
9383}
9384
9385
9386#ifdef IEM_WITH_SETJMP
9387/**
9388 * Fetches a data qword, longjmp on error.
9389 *
9390 * @returns The qword.
9391 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9392 * @param iSegReg The index of the segment register to use for
9393 * this access. The base and limits are checked.
9394 * @param GCPtrMem The address of the guest memory.
9395 */
9396DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9397{
9398 /* The lazy approach for now... */
9399 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9400 if (RT_LIKELY(!(GCPtrMem & 15)))
9401 {
9402 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9403 uint64_t const u64Ret = *pu64Src;
9404 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9405 return u64Ret;
9406 }
9407
9408 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9409 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9410}
9411#endif
9412
9413
9414/**
9415 * Fetches a data tword.
9416 *
9417 * @returns Strict VBox status code.
9418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9419 * @param pr80Dst Where to return the tword.
9420 * @param iSegReg The index of the segment register to use for
9421 * this access. The base and limits are checked.
9422 * @param GCPtrMem The address of the guest memory.
9423 */
9424IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9425{
9426 /* The lazy approach for now... */
9427 PCRTFLOAT80U pr80Src;
9428 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9429 if (rc == VINF_SUCCESS)
9430 {
9431 *pr80Dst = *pr80Src;
9432 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9433 }
9434 return rc;
9435}
9436
9437
9438#ifdef IEM_WITH_SETJMP
9439/**
9440 * Fetches a data tword, longjmp on error.
9441 *
9442 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9443 * @param pr80Dst Where to return the tword.
9444 * @param iSegReg The index of the segment register to use for
9445 * this access. The base and limits are checked.
9446 * @param GCPtrMem The address of the guest memory.
9447 */
9448DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9449{
9450 /* The lazy approach for now... */
9451 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9452 *pr80Dst = *pr80Src;
9453 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9454}
9455#endif
9456
9457
9458/**
9459 * Fetches a data dqword (double qword), generally SSE related.
9460 *
9461 * @returns Strict VBox status code.
9462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9463 * @param pu128Dst Where to return the qword.
9464 * @param iSegReg The index of the segment register to use for
9465 * this access. The base and limits are checked.
9466 * @param GCPtrMem The address of the guest memory.
9467 */
9468IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9469{
9470 /* The lazy approach for now... */
9471 PCRTUINT128U pu128Src;
9472 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9473 if (rc == VINF_SUCCESS)
9474 {
9475 pu128Dst->au64[0] = pu128Src->au64[0];
9476 pu128Dst->au64[1] = pu128Src->au64[1];
9477 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9478 }
9479 return rc;
9480}
9481
9482
9483#ifdef IEM_WITH_SETJMP
9484/**
9485 * Fetches a data dqword (double qword), generally SSE related.
9486 *
9487 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9488 * @param pu128Dst Where to return the qword.
9489 * @param iSegReg The index of the segment register to use for
9490 * this access. The base and limits are checked.
9491 * @param GCPtrMem The address of the guest memory.
9492 */
9493IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9494{
9495 /* The lazy approach for now... */
9496 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9497 pu128Dst->au64[0] = pu128Src->au64[0];
9498 pu128Dst->au64[1] = pu128Src->au64[1];
9499 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9500}
9501#endif
9502
9503
9504/**
9505 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9506 * related.
9507 *
9508 * Raises \#GP(0) if not aligned.
9509 *
9510 * @returns Strict VBox status code.
9511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9512 * @param pu128Dst Where to return the qword.
9513 * @param iSegReg The index of the segment register to use for
9514 * this access. The base and limits are checked.
9515 * @param GCPtrMem The address of the guest memory.
9516 */
9517IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9518{
9519 /* The lazy approach for now... */
9520 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9521 if ( (GCPtrMem & 15)
9522 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9523 return iemRaiseGeneralProtectionFault0(pVCpu);
9524
9525 PCRTUINT128U pu128Src;
9526 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9527 if (rc == VINF_SUCCESS)
9528 {
9529 pu128Dst->au64[0] = pu128Src->au64[0];
9530 pu128Dst->au64[1] = pu128Src->au64[1];
9531 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9532 }
9533 return rc;
9534}
9535
9536
9537#ifdef IEM_WITH_SETJMP
9538/**
9539 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9540 * related, longjmp on error.
9541 *
9542 * Raises \#GP(0) if not aligned.
9543 *
9544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9545 * @param pu128Dst Where to return the qword.
9546 * @param iSegReg The index of the segment register to use for
9547 * this access. The base and limits are checked.
9548 * @param GCPtrMem The address of the guest memory.
9549 */
9550DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9551{
9552 /* The lazy approach for now... */
9553 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9554 if ( (GCPtrMem & 15) == 0
9555 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9556 {
9557 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9558 pu128Dst->au64[0] = pu128Src->au64[0];
9559 pu128Dst->au64[1] = pu128Src->au64[1];
9560 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9561 return;
9562 }
9563
9564 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9565 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9566}
9567#endif
9568
9569
9570/**
9571 * Fetches a data oword (octo word), generally AVX related.
9572 *
9573 * @returns Strict VBox status code.
9574 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9575 * @param pu256Dst Where to return the qword.
9576 * @param iSegReg The index of the segment register to use for
9577 * this access. The base and limits are checked.
9578 * @param GCPtrMem The address of the guest memory.
9579 */
9580IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9581{
9582 /* The lazy approach for now... */
9583 PCRTUINT256U pu256Src;
9584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9585 if (rc == VINF_SUCCESS)
9586 {
9587 pu256Dst->au64[0] = pu256Src->au64[0];
9588 pu256Dst->au64[1] = pu256Src->au64[1];
9589 pu256Dst->au64[2] = pu256Src->au64[2];
9590 pu256Dst->au64[3] = pu256Src->au64[3];
9591 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9592 }
9593 return rc;
9594}
9595
9596
9597#ifdef IEM_WITH_SETJMP
9598/**
9599 * Fetches a data oword (octo word), generally AVX related.
9600 *
9601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9602 * @param pu256Dst Where to return the qword.
9603 * @param iSegReg The index of the segment register to use for
9604 * this access. The base and limits are checked.
9605 * @param GCPtrMem The address of the guest memory.
9606 */
9607IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9608{
9609 /* The lazy approach for now... */
9610 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9611 pu256Dst->au64[0] = pu256Src->au64[0];
9612 pu256Dst->au64[1] = pu256Src->au64[1];
9613 pu256Dst->au64[2] = pu256Src->au64[2];
9614 pu256Dst->au64[3] = pu256Src->au64[3];
9615 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9616}
9617#endif
9618
9619
9620/**
9621 * Fetches a data oword (octo word) at an aligned address, generally AVX
9622 * related.
9623 *
9624 * Raises \#GP(0) if not aligned.
9625 *
9626 * @returns Strict VBox status code.
9627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9628 * @param pu256Dst Where to return the qword.
9629 * @param iSegReg The index of the segment register to use for
9630 * this access. The base and limits are checked.
9631 * @param GCPtrMem The address of the guest memory.
9632 */
9633IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9634{
9635 /* The lazy approach for now... */
9636 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9637 if (GCPtrMem & 31)
9638 return iemRaiseGeneralProtectionFault0(pVCpu);
9639
9640 PCRTUINT256U pu256Src;
9641 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9642 if (rc == VINF_SUCCESS)
9643 {
9644 pu256Dst->au64[0] = pu256Src->au64[0];
9645 pu256Dst->au64[1] = pu256Src->au64[1];
9646 pu256Dst->au64[2] = pu256Src->au64[2];
9647 pu256Dst->au64[3] = pu256Src->au64[3];
9648 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9649 }
9650 return rc;
9651}
9652
9653
9654#ifdef IEM_WITH_SETJMP
9655/**
9656 * Fetches a data oword (octo word) at an aligned address, generally AVX
9657 * related, longjmp on error.
9658 *
9659 * Raises \#GP(0) if not aligned.
9660 *
9661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9662 * @param pu256Dst Where to return the qword.
9663 * @param iSegReg The index of the segment register to use for
9664 * this access. The base and limits are checked.
9665 * @param GCPtrMem The address of the guest memory.
9666 */
9667DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9668{
9669 /* The lazy approach for now... */
9670 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9671 if ((GCPtrMem & 31) == 0)
9672 {
9673 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9674 pu256Dst->au64[0] = pu256Src->au64[0];
9675 pu256Dst->au64[1] = pu256Src->au64[1];
9676 pu256Dst->au64[2] = pu256Src->au64[2];
9677 pu256Dst->au64[3] = pu256Src->au64[3];
9678 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9679 return;
9680 }
9681
9682 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9683 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9684}
9685#endif
9686
9687
9688
9689/**
9690 * Fetches a descriptor register (lgdt, lidt).
9691 *
9692 * @returns Strict VBox status code.
9693 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9694 * @param pcbLimit Where to return the limit.
9695 * @param pGCPtrBase Where to return the base.
9696 * @param iSegReg The index of the segment register to use for
9697 * this access. The base and limits are checked.
9698 * @param GCPtrMem The address of the guest memory.
9699 * @param enmOpSize The effective operand size.
9700 */
9701IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9702 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9703{
9704 /*
9705 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9706 * little special:
9707 * - The two reads are done separately.
9708 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9709 * - We suspect the 386 to actually commit the limit before the base in
9710 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9711 * don't try emulate this eccentric behavior, because it's not well
9712 * enough understood and rather hard to trigger.
9713 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9714 */
9715 VBOXSTRICTRC rcStrict;
9716 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9717 {
9718 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9719 if (rcStrict == VINF_SUCCESS)
9720 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9721 }
9722 else
9723 {
9724 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9725 if (enmOpSize == IEMMODE_32BIT)
9726 {
9727 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9728 {
9729 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9730 if (rcStrict == VINF_SUCCESS)
9731 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9732 }
9733 else
9734 {
9735 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9736 if (rcStrict == VINF_SUCCESS)
9737 {
9738 *pcbLimit = (uint16_t)uTmp;
9739 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9740 }
9741 }
9742 if (rcStrict == VINF_SUCCESS)
9743 *pGCPtrBase = uTmp;
9744 }
9745 else
9746 {
9747 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9748 if (rcStrict == VINF_SUCCESS)
9749 {
9750 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9751 if (rcStrict == VINF_SUCCESS)
9752 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9753 }
9754 }
9755 }
9756 return rcStrict;
9757}
9758
9759
9760
9761/**
9762 * Stores a data byte.
9763 *
9764 * @returns Strict VBox status code.
9765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9766 * @param iSegReg The index of the segment register to use for
9767 * this access. The base and limits are checked.
9768 * @param GCPtrMem The address of the guest memory.
9769 * @param u8Value The value to store.
9770 */
9771IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9772{
9773 /* The lazy approach for now... */
9774 uint8_t *pu8Dst;
9775 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9776 if (rc == VINF_SUCCESS)
9777 {
9778 *pu8Dst = u8Value;
9779 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9780 }
9781 return rc;
9782}
9783
9784
9785#ifdef IEM_WITH_SETJMP
9786/**
9787 * Stores a data byte, longjmp on error.
9788 *
9789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9790 * @param iSegReg The index of the segment register to use for
9791 * this access. The base and limits are checked.
9792 * @param GCPtrMem The address of the guest memory.
9793 * @param u8Value The value to store.
9794 */
9795IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9796{
9797 /* The lazy approach for now... */
9798 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9799 *pu8Dst = u8Value;
9800 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9801}
9802#endif
9803
9804
9805/**
9806 * Stores a data word.
9807 *
9808 * @returns Strict VBox status code.
9809 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9810 * @param iSegReg The index of the segment register to use for
9811 * this access. The base and limits are checked.
9812 * @param GCPtrMem The address of the guest memory.
9813 * @param u16Value The value to store.
9814 */
9815IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9816{
9817 /* The lazy approach for now... */
9818 uint16_t *pu16Dst;
9819 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9820 if (rc == VINF_SUCCESS)
9821 {
9822 *pu16Dst = u16Value;
9823 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9824 }
9825 return rc;
9826}
9827
9828
9829#ifdef IEM_WITH_SETJMP
9830/**
9831 * Stores a data word, longjmp on error.
9832 *
9833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9834 * @param iSegReg The index of the segment register to use for
9835 * this access. The base and limits are checked.
9836 * @param GCPtrMem The address of the guest memory.
9837 * @param u16Value The value to store.
9838 */
9839IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9840{
9841 /* The lazy approach for now... */
9842 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9843 *pu16Dst = u16Value;
9844 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9845}
9846#endif
9847
9848
9849/**
9850 * Stores a data dword.
9851 *
9852 * @returns Strict VBox status code.
9853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9854 * @param iSegReg The index of the segment register to use for
9855 * this access. The base and limits are checked.
9856 * @param GCPtrMem The address of the guest memory.
9857 * @param u32Value The value to store.
9858 */
9859IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9860{
9861 /* The lazy approach for now... */
9862 uint32_t *pu32Dst;
9863 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9864 if (rc == VINF_SUCCESS)
9865 {
9866 *pu32Dst = u32Value;
9867 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9868 }
9869 return rc;
9870}
9871
9872
9873#ifdef IEM_WITH_SETJMP
9874/**
9875 * Stores a data dword.
9876 *
9877 * @returns Strict VBox status code.
9878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9879 * @param iSegReg The index of the segment register to use for
9880 * this access. The base and limits are checked.
9881 * @param GCPtrMem The address of the guest memory.
9882 * @param u32Value The value to store.
9883 */
9884IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9885{
9886 /* The lazy approach for now... */
9887 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9888 *pu32Dst = u32Value;
9889 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9890}
9891#endif
9892
9893
9894/**
9895 * Stores a data qword.
9896 *
9897 * @returns Strict VBox status code.
9898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9899 * @param iSegReg The index of the segment register to use for
9900 * this access. The base and limits are checked.
9901 * @param GCPtrMem The address of the guest memory.
9902 * @param u64Value The value to store.
9903 */
9904IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9905{
9906 /* The lazy approach for now... */
9907 uint64_t *pu64Dst;
9908 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9909 if (rc == VINF_SUCCESS)
9910 {
9911 *pu64Dst = u64Value;
9912 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9913 }
9914 return rc;
9915}
9916
9917
9918#ifdef IEM_WITH_SETJMP
9919/**
9920 * Stores a data qword, longjmp on error.
9921 *
9922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9923 * @param iSegReg The index of the segment register to use for
9924 * this access. The base and limits are checked.
9925 * @param GCPtrMem The address of the guest memory.
9926 * @param u64Value The value to store.
9927 */
9928IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9929{
9930 /* The lazy approach for now... */
9931 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9932 *pu64Dst = u64Value;
9933 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9934}
9935#endif
9936
9937
9938/**
9939 * Stores a data dqword.
9940 *
9941 * @returns Strict VBox status code.
9942 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9943 * @param iSegReg The index of the segment register to use for
9944 * this access. The base and limits are checked.
9945 * @param GCPtrMem The address of the guest memory.
9946 * @param u128Value The value to store.
9947 */
9948IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9949{
9950 /* The lazy approach for now... */
9951 PRTUINT128U pu128Dst;
9952 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9953 if (rc == VINF_SUCCESS)
9954 {
9955 pu128Dst->au64[0] = u128Value.au64[0];
9956 pu128Dst->au64[1] = u128Value.au64[1];
9957 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9958 }
9959 return rc;
9960}
9961
9962
9963#ifdef IEM_WITH_SETJMP
9964/**
9965 * Stores a data dqword, longjmp on error.
9966 *
9967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9968 * @param iSegReg The index of the segment register to use for
9969 * this access. The base and limits are checked.
9970 * @param GCPtrMem The address of the guest memory.
9971 * @param u128Value The value to store.
9972 */
9973IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9974{
9975 /* The lazy approach for now... */
9976 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9977 pu128Dst->au64[0] = u128Value.au64[0];
9978 pu128Dst->au64[1] = u128Value.au64[1];
9979 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9980}
9981#endif
9982
9983
9984/**
9985 * Stores a data dqword, SSE aligned.
9986 *
9987 * @returns Strict VBox status code.
9988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9989 * @param iSegReg The index of the segment register to use for
9990 * this access. The base and limits are checked.
9991 * @param GCPtrMem The address of the guest memory.
9992 * @param u128Value The value to store.
9993 */
9994IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9995{
9996 /* The lazy approach for now... */
9997 if ( (GCPtrMem & 15)
9998 && !(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9999 return iemRaiseGeneralProtectionFault0(pVCpu);
10000
10001 PRTUINT128U pu128Dst;
10002 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10003 if (rc == VINF_SUCCESS)
10004 {
10005 pu128Dst->au64[0] = u128Value.au64[0];
10006 pu128Dst->au64[1] = u128Value.au64[1];
10007 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10008 }
10009 return rc;
10010}
10011
10012
10013#ifdef IEM_WITH_SETJMP
10014/**
10015 * Stores a data dqword, SSE aligned.
10016 *
10017 * @returns Strict VBox status code.
10018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10019 * @param iSegReg The index of the segment register to use for
10020 * this access. The base and limits are checked.
10021 * @param GCPtrMem The address of the guest memory.
10022 * @param u128Value The value to store.
10023 */
10024DECL_NO_INLINE(IEM_STATIC, void)
10025iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10026{
10027 /* The lazy approach for now... */
10028 if ( (GCPtrMem & 15) == 0
10029 || (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10030 {
10031 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10032 pu128Dst->au64[0] = u128Value.au64[0];
10033 pu128Dst->au64[1] = u128Value.au64[1];
10034 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10035 return;
10036 }
10037
10038 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10039 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10040}
10041#endif
10042
10043
10044/**
10045 * Stores a data dqword.
10046 *
10047 * @returns Strict VBox status code.
10048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10049 * @param iSegReg The index of the segment register to use for
10050 * this access. The base and limits are checked.
10051 * @param GCPtrMem The address of the guest memory.
10052 * @param pu256Value Pointer to the value to store.
10053 */
10054IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10055{
10056 /* The lazy approach for now... */
10057 PRTUINT256U pu256Dst;
10058 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10059 if (rc == VINF_SUCCESS)
10060 {
10061 pu256Dst->au64[0] = pu256Value->au64[0];
10062 pu256Dst->au64[1] = pu256Value->au64[1];
10063 pu256Dst->au64[2] = pu256Value->au64[2];
10064 pu256Dst->au64[3] = pu256Value->au64[3];
10065 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10066 }
10067 return rc;
10068}
10069
10070
10071#ifdef IEM_WITH_SETJMP
10072/**
10073 * Stores a data dqword, longjmp on error.
10074 *
10075 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10076 * @param iSegReg The index of the segment register to use for
10077 * this access. The base and limits are checked.
10078 * @param GCPtrMem The address of the guest memory.
10079 * @param pu256Value Pointer to the value to store.
10080 */
10081IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10082{
10083 /* The lazy approach for now... */
10084 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10085 pu256Dst->au64[0] = pu256Value->au64[0];
10086 pu256Dst->au64[1] = pu256Value->au64[1];
10087 pu256Dst->au64[2] = pu256Value->au64[2];
10088 pu256Dst->au64[3] = pu256Value->au64[3];
10089 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10090}
10091#endif
10092
10093
10094/**
10095 * Stores a data dqword, AVX aligned.
10096 *
10097 * @returns Strict VBox status code.
10098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10099 * @param iSegReg The index of the segment register to use for
10100 * this access. The base and limits are checked.
10101 * @param GCPtrMem The address of the guest memory.
10102 * @param pu256Value Pointer to the value to store.
10103 */
10104IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10105{
10106 /* The lazy approach for now... */
10107 if (GCPtrMem & 31)
10108 return iemRaiseGeneralProtectionFault0(pVCpu);
10109
10110 PRTUINT256U pu256Dst;
10111 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10112 if (rc == VINF_SUCCESS)
10113 {
10114 pu256Dst->au64[0] = pu256Value->au64[0];
10115 pu256Dst->au64[1] = pu256Value->au64[1];
10116 pu256Dst->au64[2] = pu256Value->au64[2];
10117 pu256Dst->au64[3] = pu256Value->au64[3];
10118 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10119 }
10120 return rc;
10121}
10122
10123
10124#ifdef IEM_WITH_SETJMP
10125/**
10126 * Stores a data dqword, AVX aligned.
10127 *
10128 * @returns Strict VBox status code.
10129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10130 * @param iSegReg The index of the segment register to use for
10131 * this access. The base and limits are checked.
10132 * @param GCPtrMem The address of the guest memory.
10133 * @param pu256Value Pointer to the value to store.
10134 */
10135DECL_NO_INLINE(IEM_STATIC, void)
10136iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10137{
10138 /* The lazy approach for now... */
10139 if ((GCPtrMem & 31) == 0)
10140 {
10141 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10142 pu256Dst->au64[0] = pu256Value->au64[0];
10143 pu256Dst->au64[1] = pu256Value->au64[1];
10144 pu256Dst->au64[2] = pu256Value->au64[2];
10145 pu256Dst->au64[3] = pu256Value->au64[3];
10146 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10147 return;
10148 }
10149
10150 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10151 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10152}
10153#endif
10154
10155
10156/**
10157 * Stores a descriptor register (sgdt, sidt).
10158 *
10159 * @returns Strict VBox status code.
10160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10161 * @param cbLimit The limit.
10162 * @param GCPtrBase The base address.
10163 * @param iSegReg The index of the segment register to use for
10164 * this access. The base and limits are checked.
10165 * @param GCPtrMem The address of the guest memory.
10166 */
10167IEM_STATIC VBOXSTRICTRC
10168iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10169{
10170 /*
10171 * The SIDT and SGDT instructions actually stores the data using two
10172 * independent writes. The instructions does not respond to opsize prefixes.
10173 */
10174 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10175 if (rcStrict == VINF_SUCCESS)
10176 {
10177 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10178 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10179 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10180 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10181 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10182 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10183 else
10184 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10185 }
10186 return rcStrict;
10187}
10188
10189
10190/**
10191 * Pushes a word onto the stack.
10192 *
10193 * @returns Strict VBox status code.
10194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10195 * @param u16Value The value to push.
10196 */
10197IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10198{
10199 /* Increment the stack pointer. */
10200 uint64_t uNewRsp;
10201 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10202 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 2, &uNewRsp);
10203
10204 /* Write the word the lazy way. */
10205 uint16_t *pu16Dst;
10206 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10207 if (rc == VINF_SUCCESS)
10208 {
10209 *pu16Dst = u16Value;
10210 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10211 }
10212
10213 /* Commit the new RSP value unless we an access handler made trouble. */
10214 if (rc == VINF_SUCCESS)
10215 pCtx->rsp = uNewRsp;
10216
10217 return rc;
10218}
10219
10220
10221/**
10222 * Pushes a dword onto the stack.
10223 *
10224 * @returns Strict VBox status code.
10225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10226 * @param u32Value The value to push.
10227 */
10228IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10229{
10230 /* Increment the stack pointer. */
10231 uint64_t uNewRsp;
10232 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10233 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10234
10235 /* Write the dword the lazy way. */
10236 uint32_t *pu32Dst;
10237 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10238 if (rc == VINF_SUCCESS)
10239 {
10240 *pu32Dst = u32Value;
10241 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10242 }
10243
10244 /* Commit the new RSP value unless we an access handler made trouble. */
10245 if (rc == VINF_SUCCESS)
10246 pCtx->rsp = uNewRsp;
10247
10248 return rc;
10249}
10250
10251
10252/**
10253 * Pushes a dword segment register value onto the stack.
10254 *
10255 * @returns Strict VBox status code.
10256 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10257 * @param u32Value The value to push.
10258 */
10259IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10260{
10261 /* Increment the stack pointer. */
10262 uint64_t uNewRsp;
10263 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10264 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp);
10265
10266 /* The intel docs talks about zero extending the selector register
10267 value. My actual intel CPU here might be zero extending the value
10268 but it still only writes the lower word... */
10269 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10270 * happens when crossing an electric page boundrary, is the high word checked
10271 * for write accessibility or not? Probably it is. What about segment limits?
10272 * It appears this behavior is also shared with trap error codes.
10273 *
10274 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10275 * ancient hardware when it actually did change. */
10276 uint16_t *pu16Dst;
10277 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10278 if (rc == VINF_SUCCESS)
10279 {
10280 *pu16Dst = (uint16_t)u32Value;
10281 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10282 }
10283
10284 /* Commit the new RSP value unless we an access handler made trouble. */
10285 if (rc == VINF_SUCCESS)
10286 pCtx->rsp = uNewRsp;
10287
10288 return rc;
10289}
10290
10291
10292/**
10293 * Pushes a qword onto the stack.
10294 *
10295 * @returns Strict VBox status code.
10296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10297 * @param u64Value The value to push.
10298 */
10299IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10300{
10301 /* Increment the stack pointer. */
10302 uint64_t uNewRsp;
10303 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10304 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 8, &uNewRsp);
10305
10306 /* Write the word the lazy way. */
10307 uint64_t *pu64Dst;
10308 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10309 if (rc == VINF_SUCCESS)
10310 {
10311 *pu64Dst = u64Value;
10312 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10313 }
10314
10315 /* Commit the new RSP value unless we an access handler made trouble. */
10316 if (rc == VINF_SUCCESS)
10317 pCtx->rsp = uNewRsp;
10318
10319 return rc;
10320}
10321
10322
10323/**
10324 * Pops a word from the stack.
10325 *
10326 * @returns Strict VBox status code.
10327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10328 * @param pu16Value Where to store the popped value.
10329 */
10330IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10331{
10332 /* Increment the stack pointer. */
10333 uint64_t uNewRsp;
10334 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10335 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 2, &uNewRsp);
10336
10337 /* Write the word the lazy way. */
10338 uint16_t const *pu16Src;
10339 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10340 if (rc == VINF_SUCCESS)
10341 {
10342 *pu16Value = *pu16Src;
10343 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10344
10345 /* Commit the new RSP value. */
10346 if (rc == VINF_SUCCESS)
10347 pCtx->rsp = uNewRsp;
10348 }
10349
10350 return rc;
10351}
10352
10353
10354/**
10355 * Pops a dword from the stack.
10356 *
10357 * @returns Strict VBox status code.
10358 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10359 * @param pu32Value Where to store the popped value.
10360 */
10361IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10362{
10363 /* Increment the stack pointer. */
10364 uint64_t uNewRsp;
10365 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10366 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 4, &uNewRsp);
10367
10368 /* Write the word the lazy way. */
10369 uint32_t const *pu32Src;
10370 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10371 if (rc == VINF_SUCCESS)
10372 {
10373 *pu32Value = *pu32Src;
10374 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10375
10376 /* Commit the new RSP value. */
10377 if (rc == VINF_SUCCESS)
10378 pCtx->rsp = uNewRsp;
10379 }
10380
10381 return rc;
10382}
10383
10384
10385/**
10386 * Pops a qword from the stack.
10387 *
10388 * @returns Strict VBox status code.
10389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10390 * @param pu64Value Where to store the popped value.
10391 */
10392IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10393{
10394 /* Increment the stack pointer. */
10395 uint64_t uNewRsp;
10396 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10397 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, 8, &uNewRsp);
10398
10399 /* Write the word the lazy way. */
10400 uint64_t const *pu64Src;
10401 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10402 if (rc == VINF_SUCCESS)
10403 {
10404 *pu64Value = *pu64Src;
10405 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10406
10407 /* Commit the new RSP value. */
10408 if (rc == VINF_SUCCESS)
10409 pCtx->rsp = uNewRsp;
10410 }
10411
10412 return rc;
10413}
10414
10415
10416/**
10417 * Pushes a word onto the stack, using a temporary stack pointer.
10418 *
10419 * @returns Strict VBox status code.
10420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10421 * @param u16Value The value to push.
10422 * @param pTmpRsp Pointer to the temporary stack pointer.
10423 */
10424IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10425{
10426 /* Increment the stack pointer. */
10427 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10428 RTUINT64U NewRsp = *pTmpRsp;
10429 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 2);
10430
10431 /* Write the word the lazy way. */
10432 uint16_t *pu16Dst;
10433 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10434 if (rc == VINF_SUCCESS)
10435 {
10436 *pu16Dst = u16Value;
10437 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10438 }
10439
10440 /* Commit the new RSP value unless we an access handler made trouble. */
10441 if (rc == VINF_SUCCESS)
10442 *pTmpRsp = NewRsp;
10443
10444 return rc;
10445}
10446
10447
10448/**
10449 * Pushes a dword onto the stack, using a temporary stack pointer.
10450 *
10451 * @returns Strict VBox status code.
10452 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10453 * @param u32Value The value to push.
10454 * @param pTmpRsp Pointer to the temporary stack pointer.
10455 */
10456IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10457{
10458 /* Increment the stack pointer. */
10459 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10460 RTUINT64U NewRsp = *pTmpRsp;
10461 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 4);
10462
10463 /* Write the word the lazy way. */
10464 uint32_t *pu32Dst;
10465 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10466 if (rc == VINF_SUCCESS)
10467 {
10468 *pu32Dst = u32Value;
10469 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10470 }
10471
10472 /* Commit the new RSP value unless we an access handler made trouble. */
10473 if (rc == VINF_SUCCESS)
10474 *pTmpRsp = NewRsp;
10475
10476 return rc;
10477}
10478
10479
10480/**
10481 * Pushes a dword onto the stack, using a temporary stack pointer.
10482 *
10483 * @returns Strict VBox status code.
10484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10485 * @param u64Value The value to push.
10486 * @param pTmpRsp Pointer to the temporary stack pointer.
10487 */
10488IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10489{
10490 /* Increment the stack pointer. */
10491 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10492 RTUINT64U NewRsp = *pTmpRsp;
10493 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, pCtx, &NewRsp, 8);
10494
10495 /* Write the word the lazy way. */
10496 uint64_t *pu64Dst;
10497 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10498 if (rc == VINF_SUCCESS)
10499 {
10500 *pu64Dst = u64Value;
10501 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10502 }
10503
10504 /* Commit the new RSP value unless we an access handler made trouble. */
10505 if (rc == VINF_SUCCESS)
10506 *pTmpRsp = NewRsp;
10507
10508 return rc;
10509}
10510
10511
10512/**
10513 * Pops a word from the stack, using a temporary stack pointer.
10514 *
10515 * @returns Strict VBox status code.
10516 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10517 * @param pu16Value Where to store the popped value.
10518 * @param pTmpRsp Pointer to the temporary stack pointer.
10519 */
10520IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10521{
10522 /* Increment the stack pointer. */
10523 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10524 RTUINT64U NewRsp = *pTmpRsp;
10525 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 2);
10526
10527 /* Write the word the lazy way. */
10528 uint16_t const *pu16Src;
10529 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10530 if (rc == VINF_SUCCESS)
10531 {
10532 *pu16Value = *pu16Src;
10533 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10534
10535 /* Commit the new RSP value. */
10536 if (rc == VINF_SUCCESS)
10537 *pTmpRsp = NewRsp;
10538 }
10539
10540 return rc;
10541}
10542
10543
10544/**
10545 * Pops a dword from the stack, using a temporary stack pointer.
10546 *
10547 * @returns Strict VBox status code.
10548 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10549 * @param pu32Value Where to store the popped value.
10550 * @param pTmpRsp Pointer to the temporary stack pointer.
10551 */
10552IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10553{
10554 /* Increment the stack pointer. */
10555 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10556 RTUINT64U NewRsp = *pTmpRsp;
10557 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 4);
10558
10559 /* Write the word the lazy way. */
10560 uint32_t const *pu32Src;
10561 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10562 if (rc == VINF_SUCCESS)
10563 {
10564 *pu32Value = *pu32Src;
10565 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10566
10567 /* Commit the new RSP value. */
10568 if (rc == VINF_SUCCESS)
10569 *pTmpRsp = NewRsp;
10570 }
10571
10572 return rc;
10573}
10574
10575
10576/**
10577 * Pops a qword from the stack, using a temporary stack pointer.
10578 *
10579 * @returns Strict VBox status code.
10580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10581 * @param pu64Value Where to store the popped value.
10582 * @param pTmpRsp Pointer to the temporary stack pointer.
10583 */
10584IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10585{
10586 /* Increment the stack pointer. */
10587 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10588 RTUINT64U NewRsp = *pTmpRsp;
10589 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10590
10591 /* Write the word the lazy way. */
10592 uint64_t const *pu64Src;
10593 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10594 if (rcStrict == VINF_SUCCESS)
10595 {
10596 *pu64Value = *pu64Src;
10597 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10598
10599 /* Commit the new RSP value. */
10600 if (rcStrict == VINF_SUCCESS)
10601 *pTmpRsp = NewRsp;
10602 }
10603
10604 return rcStrict;
10605}
10606
10607
10608/**
10609 * Begin a special stack push (used by interrupt, exceptions and such).
10610 *
10611 * This will raise \#SS or \#PF if appropriate.
10612 *
10613 * @returns Strict VBox status code.
10614 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10615 * @param cbMem The number of bytes to push onto the stack.
10616 * @param ppvMem Where to return the pointer to the stack memory.
10617 * As with the other memory functions this could be
10618 * direct access or bounce buffered access, so
10619 * don't commit register until the commit call
10620 * succeeds.
10621 * @param puNewRsp Where to return the new RSP value. This must be
10622 * passed unchanged to
10623 * iemMemStackPushCommitSpecial().
10624 */
10625IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10626{
10627 Assert(cbMem < UINT8_MAX);
10628 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10629 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10630 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10631}
10632
10633
10634/**
10635 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10636 *
10637 * This will update the rSP.
10638 *
10639 * @returns Strict VBox status code.
10640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10641 * @param pvMem The pointer returned by
10642 * iemMemStackPushBeginSpecial().
10643 * @param uNewRsp The new RSP value returned by
10644 * iemMemStackPushBeginSpecial().
10645 */
10646IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10647{
10648 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10649 if (rcStrict == VINF_SUCCESS)
10650 IEM_GET_CTX(pVCpu)->rsp = uNewRsp;
10651 return rcStrict;
10652}
10653
10654
10655/**
10656 * Begin a special stack pop (used by iret, retf and such).
10657 *
10658 * This will raise \#SS or \#PF if appropriate.
10659 *
10660 * @returns Strict VBox status code.
10661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10662 * @param cbMem The number of bytes to pop from the stack.
10663 * @param ppvMem Where to return the pointer to the stack memory.
10664 * @param puNewRsp Where to return the new RSP value. This must be
10665 * assigned to CPUMCTX::rsp manually some time
10666 * after iemMemStackPopDoneSpecial() has been
10667 * called.
10668 */
10669IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10670{
10671 Assert(cbMem < UINT8_MAX);
10672 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10673 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, pCtx, (uint8_t)cbMem, puNewRsp);
10674 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10675}
10676
10677
10678/**
10679 * Continue a special stack pop (used by iret and retf).
10680 *
10681 * This will raise \#SS or \#PF if appropriate.
10682 *
10683 * @returns Strict VBox status code.
10684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10685 * @param cbMem The number of bytes to pop from the stack.
10686 * @param ppvMem Where to return the pointer to the stack memory.
10687 * @param puNewRsp Where to return the new RSP value. This must be
10688 * assigned to CPUMCTX::rsp manually some time
10689 * after iemMemStackPopDoneSpecial() has been
10690 * called.
10691 */
10692IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10693{
10694 Assert(cbMem < UINT8_MAX);
10695 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10696 RTUINT64U NewRsp;
10697 NewRsp.u = *puNewRsp;
10698 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, pCtx, &NewRsp, 8);
10699 *puNewRsp = NewRsp.u;
10700 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10701}
10702
10703
10704/**
10705 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10706 * iemMemStackPopContinueSpecial).
10707 *
10708 * The caller will manually commit the rSP.
10709 *
10710 * @returns Strict VBox status code.
10711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10712 * @param pvMem The pointer returned by
10713 * iemMemStackPopBeginSpecial() or
10714 * iemMemStackPopContinueSpecial().
10715 */
10716IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10717{
10718 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10719}
10720
10721
10722/**
10723 * Fetches a system table byte.
10724 *
10725 * @returns Strict VBox status code.
10726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10727 * @param pbDst Where to return the byte.
10728 * @param iSegReg The index of the segment register to use for
10729 * this access. The base and limits are checked.
10730 * @param GCPtrMem The address of the guest memory.
10731 */
10732IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10733{
10734 /* The lazy approach for now... */
10735 uint8_t const *pbSrc;
10736 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10737 if (rc == VINF_SUCCESS)
10738 {
10739 *pbDst = *pbSrc;
10740 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10741 }
10742 return rc;
10743}
10744
10745
10746/**
10747 * Fetches a system table word.
10748 *
10749 * @returns Strict VBox status code.
10750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10751 * @param pu16Dst Where to return the word.
10752 * @param iSegReg The index of the segment register to use for
10753 * this access. The base and limits are checked.
10754 * @param GCPtrMem The address of the guest memory.
10755 */
10756IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10757{
10758 /* The lazy approach for now... */
10759 uint16_t const *pu16Src;
10760 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10761 if (rc == VINF_SUCCESS)
10762 {
10763 *pu16Dst = *pu16Src;
10764 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10765 }
10766 return rc;
10767}
10768
10769
10770/**
10771 * Fetches a system table dword.
10772 *
10773 * @returns Strict VBox status code.
10774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10775 * @param pu32Dst Where to return the dword.
10776 * @param iSegReg The index of the segment register to use for
10777 * this access. The base and limits are checked.
10778 * @param GCPtrMem The address of the guest memory.
10779 */
10780IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10781{
10782 /* The lazy approach for now... */
10783 uint32_t const *pu32Src;
10784 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10785 if (rc == VINF_SUCCESS)
10786 {
10787 *pu32Dst = *pu32Src;
10788 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10789 }
10790 return rc;
10791}
10792
10793
10794/**
10795 * Fetches a system table qword.
10796 *
10797 * @returns Strict VBox status code.
10798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10799 * @param pu64Dst Where to return the qword.
10800 * @param iSegReg The index of the segment register to use for
10801 * this access. The base and limits are checked.
10802 * @param GCPtrMem The address of the guest memory.
10803 */
10804IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10805{
10806 /* The lazy approach for now... */
10807 uint64_t const *pu64Src;
10808 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10809 if (rc == VINF_SUCCESS)
10810 {
10811 *pu64Dst = *pu64Src;
10812 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10813 }
10814 return rc;
10815}
10816
10817
10818/**
10819 * Fetches a descriptor table entry with caller specified error code.
10820 *
10821 * @returns Strict VBox status code.
10822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10823 * @param pDesc Where to return the descriptor table entry.
10824 * @param uSel The selector which table entry to fetch.
10825 * @param uXcpt The exception to raise on table lookup error.
10826 * @param uErrorCode The error code associated with the exception.
10827 */
10828IEM_STATIC VBOXSTRICTRC
10829iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10830{
10831 AssertPtr(pDesc);
10832 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10833 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10834
10835 /** @todo did the 286 require all 8 bytes to be accessible? */
10836 /*
10837 * Get the selector table base and check bounds.
10838 */
10839 RTGCPTR GCPtrBase;
10840 if (uSel & X86_SEL_LDT)
10841 {
10842 if ( !pCtx->ldtr.Attr.n.u1Present
10843 || (uSel | X86_SEL_RPL_LDT) > pCtx->ldtr.u32Limit )
10844 {
10845 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10846 uSel, pCtx->ldtr.u32Limit, pCtx->ldtr.Sel));
10847 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10848 uErrorCode, 0);
10849 }
10850
10851 Assert(pCtx->ldtr.Attr.n.u1Present);
10852 GCPtrBase = pCtx->ldtr.u64Base;
10853 }
10854 else
10855 {
10856 if ((uSel | X86_SEL_RPL_LDT) > pCtx->gdtr.cbGdt)
10857 {
10858 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pCtx->gdtr.cbGdt));
10859 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10860 uErrorCode, 0);
10861 }
10862 GCPtrBase = pCtx->gdtr.pGdt;
10863 }
10864
10865 /*
10866 * Read the legacy descriptor and maybe the long mode extensions if
10867 * required.
10868 */
10869 VBOXSTRICTRC rcStrict;
10870 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10871 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10872 else
10873 {
10874 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10875 if (rcStrict == VINF_SUCCESS)
10876 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10877 if (rcStrict == VINF_SUCCESS)
10878 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10879 if (rcStrict == VINF_SUCCESS)
10880 pDesc->Legacy.au16[3] = 0;
10881 else
10882 return rcStrict;
10883 }
10884
10885 if (rcStrict == VINF_SUCCESS)
10886 {
10887 if ( !IEM_IS_LONG_MODE(pVCpu)
10888 || pDesc->Legacy.Gen.u1DescType)
10889 pDesc->Long.au64[1] = 0;
10890 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pCtx->ldtr.u32Limit : pCtx->gdtr.cbGdt))
10891 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10892 else
10893 {
10894 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10895 /** @todo is this the right exception? */
10896 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10897 }
10898 }
10899 return rcStrict;
10900}
10901
10902
10903/**
10904 * Fetches a descriptor table entry.
10905 *
10906 * @returns Strict VBox status code.
10907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10908 * @param pDesc Where to return the descriptor table entry.
10909 * @param uSel The selector which table entry to fetch.
10910 * @param uXcpt The exception to raise on table lookup error.
10911 */
10912IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10913{
10914 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10915}
10916
10917
10918/**
10919 * Fakes a long mode stack selector for SS = 0.
10920 *
10921 * @param pDescSs Where to return the fake stack descriptor.
10922 * @param uDpl The DPL we want.
10923 */
10924IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10925{
10926 pDescSs->Long.au64[0] = 0;
10927 pDescSs->Long.au64[1] = 0;
10928 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10929 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10930 pDescSs->Long.Gen.u2Dpl = uDpl;
10931 pDescSs->Long.Gen.u1Present = 1;
10932 pDescSs->Long.Gen.u1Long = 1;
10933}
10934
10935
10936/**
10937 * Marks the selector descriptor as accessed (only non-system descriptors).
10938 *
10939 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10940 * will therefore skip the limit checks.
10941 *
10942 * @returns Strict VBox status code.
10943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10944 * @param uSel The selector.
10945 */
10946IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10947{
10948 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
10949
10950 /*
10951 * Get the selector table base and calculate the entry address.
10952 */
10953 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10954 ? pCtx->ldtr.u64Base
10955 : pCtx->gdtr.pGdt;
10956 GCPtr += uSel & X86_SEL_MASK;
10957
10958 /*
10959 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10960 * ugly stuff to avoid this. This will make sure it's an atomic access
10961 * as well more or less remove any question about 8-bit or 32-bit accesss.
10962 */
10963 VBOXSTRICTRC rcStrict;
10964 uint32_t volatile *pu32;
10965 if ((GCPtr & 3) == 0)
10966 {
10967 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10968 GCPtr += 2 + 2;
10969 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10970 if (rcStrict != VINF_SUCCESS)
10971 return rcStrict;
10972 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10973 }
10974 else
10975 {
10976 /* The misaligned GDT/LDT case, map the whole thing. */
10977 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10978 if (rcStrict != VINF_SUCCESS)
10979 return rcStrict;
10980 switch ((uintptr_t)pu32 & 3)
10981 {
10982 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10983 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10984 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10985 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10986 }
10987 }
10988
10989 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10990}
10991
10992/** @} */
10993
10994
10995/*
10996 * Include the C/C++ implementation of instruction.
10997 */
10998#include "IEMAllCImpl.cpp.h"
10999
11000
11001
11002/** @name "Microcode" macros.
11003 *
11004 * The idea is that we should be able to use the same code to interpret
11005 * instructions as well as recompiler instructions. Thus this obfuscation.
11006 *
11007 * @{
11008 */
11009#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11010#define IEM_MC_END() }
11011#define IEM_MC_PAUSE() do {} while (0)
11012#define IEM_MC_CONTINUE() do {} while (0)
11013
11014/** Internal macro. */
11015#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11016 do \
11017 { \
11018 VBOXSTRICTRC rcStrict2 = a_Expr; \
11019 if (rcStrict2 != VINF_SUCCESS) \
11020 return rcStrict2; \
11021 } while (0)
11022
11023
11024#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11025#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11026#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11027#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11028#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11029#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11030#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11031#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11032#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11033 do { \
11034 if (IEM_GET_CTX(pVCpu)->cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11035 return iemRaiseDeviceNotAvailable(pVCpu); \
11036 } while (0)
11037#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11038 do { \
11039 if ((IEM_GET_CTX(pVCpu)->cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11040 return iemRaiseDeviceNotAvailable(pVCpu); \
11041 } while (0)
11042#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11043 do { \
11044 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11045 return iemRaiseMathFault(pVCpu); \
11046 } while (0)
11047#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11048 do { \
11049 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11050 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11051 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11052 return iemRaiseUndefinedOpcode(pVCpu); \
11053 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11054 return iemRaiseDeviceNotAvailable(pVCpu); \
11055 } while (0)
11056#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11057 do { \
11058 if ( (IEM_GET_CTX(pVCpu)->aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11059 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSXSAVE) \
11060 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11061 return iemRaiseUndefinedOpcode(pVCpu); \
11062 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11063 return iemRaiseDeviceNotAvailable(pVCpu); \
11064 } while (0)
11065#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11066 do { \
11067 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11068 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11069 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11070 return iemRaiseUndefinedOpcode(pVCpu); \
11071 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11072 return iemRaiseDeviceNotAvailable(pVCpu); \
11073 } while (0)
11074#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11075 do { \
11076 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11077 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11078 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11079 return iemRaiseUndefinedOpcode(pVCpu); \
11080 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11081 return iemRaiseDeviceNotAvailable(pVCpu); \
11082 } while (0)
11083#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11084 do { \
11085 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11086 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11087 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11088 return iemRaiseUndefinedOpcode(pVCpu); \
11089 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11090 return iemRaiseDeviceNotAvailable(pVCpu); \
11091 } while (0)
11092#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11093 do { \
11094 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11095 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_OSFXSR) \
11096 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11097 return iemRaiseUndefinedOpcode(pVCpu); \
11098 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11099 return iemRaiseDeviceNotAvailable(pVCpu); \
11100 } while (0)
11101#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11102 do { \
11103 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11104 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11105 return iemRaiseUndefinedOpcode(pVCpu); \
11106 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11107 return iemRaiseDeviceNotAvailable(pVCpu); \
11108 } while (0)
11109#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11110 do { \
11111 if ( (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_EM) \
11112 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11113 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11114 return iemRaiseUndefinedOpcode(pVCpu); \
11115 if (IEM_GET_CTX(pVCpu)->cr0 & X86_CR0_TS) \
11116 return iemRaiseDeviceNotAvailable(pVCpu); \
11117 } while (0)
11118#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11119 do { \
11120 if (pVCpu->iem.s.uCpl != 0) \
11121 return iemRaiseGeneralProtectionFault0(pVCpu); \
11122 } while (0)
11123#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11124 do { \
11125 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11126 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11127 } while (0)
11128#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11129 do { \
11130 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11131 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11132 || !(IEM_GET_CTX(pVCpu)->cr4 & X86_CR4_FSGSBASE)) \
11133 return iemRaiseUndefinedOpcode(pVCpu); \
11134 } while (0)
11135#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11136 do { \
11137 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11138 return iemRaiseGeneralProtectionFault0(pVCpu); \
11139 } while (0)
11140
11141
11142#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11143#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11144#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11145#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11146#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11147#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11148#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11149 uint32_t a_Name; \
11150 uint32_t *a_pName = &a_Name
11151#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11152 do { IEM_GET_CTX(pVCpu)->eflags.u = (a_EFlags); Assert(IEM_GET_CTX(pVCpu)->eflags.u & X86_EFL_1); } while (0)
11153
11154#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11155#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11156
11157#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11158#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11159#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11160#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11161#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11162#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11163#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11164#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11165#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11166#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11167#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11168#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11169#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11170#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11171#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11172#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11173#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11174#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11175 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11176 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11177 } while (0)
11178#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11179 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11180 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11181 } while (0)
11182#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11183 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11184 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11185 } while (0)
11186/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11187#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11188 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11189 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11190 } while (0)
11191#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11192 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11193 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11194 } while (0)
11195#define IEM_MC_FETCH_CR0_U16(a_u16Dst) (a_u16Dst) = (uint16_t)IEM_GET_CTX(pVCpu)->cr0
11196#define IEM_MC_FETCH_CR0_U32(a_u32Dst) (a_u32Dst) = (uint32_t)IEM_GET_CTX(pVCpu)->cr0
11197#define IEM_MC_FETCH_CR0_U64(a_u64Dst) (a_u64Dst) = IEM_GET_CTX(pVCpu)->cr0
11198/** @todo IEM_MC_FETCH_LDTR_U16, IEM_MC_FETCH_LDTR_U32, IEM_MC_FETCH_LDTR_U64, IEM_MC_FETCH_TR_U16, IEM_MC_FETCH_TR_U32, and IEM_MC_FETCH_TR_U64 aren't worth it... */
11199#define IEM_MC_FETCH_LDTR_U16(a_u16Dst) do { \
11200 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_LDTR); \
11201 (a_u16Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \
11202 } while (0)
11203#define IEM_MC_FETCH_LDTR_U32(a_u32Dst) do { \
11204 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_LDTR); \
11205 (a_u32Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \
11206 } while (0)
11207#define IEM_MC_FETCH_LDTR_U64(a_u64Dst) do { \
11208 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_LDTR); \
11209 (a_u64Dst) = IEM_GET_CTX(pVCpu)->ldtr.Sel; \
11210 } while (0)
11211#define IEM_MC_FETCH_TR_U16(a_u16Dst) do { \
11212 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_TR); \
11213 (a_u16Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \
11214 } while (0)
11215#define IEM_MC_FETCH_TR_U32(a_u32Dst) do { \
11216 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_TR); \
11217 (a_u32Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \
11218 } while (0)
11219#define IEM_MC_FETCH_TR_U64(a_u64Dst) do { \
11220 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_TR); \
11221 (a_u64Dst) = IEM_GET_CTX(pVCpu)->tr.Sel; \
11222 } while (0)
11223/** @note Not for IOPL or IF testing or modification. */
11224#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = IEM_GET_CTX(pVCpu)->eflags.u
11225#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)IEM_GET_CTX(pVCpu)->eflags.u
11226#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW
11227#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW
11228
11229#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11230#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11231#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11232#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11233#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11234#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11235#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11236#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11237#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11238#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11239/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11240#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11241 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11242 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11243 } while (0)
11244#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11245 IEM_CTX_IMPORT_NORET(pVCpu, IEM_GET_CTX(pVCpu), CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11246 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11247 } while (0)
11248#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11249 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11250
11251
11252#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11253#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11254/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11255 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11256#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11257#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11258/** @note Not for IOPL or IF testing or modification. */
11259#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &IEM_GET_CTX(pVCpu)->eflags.u
11260
11261#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11262#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11263#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11264 do { \
11265 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11266 *pu32Reg += (a_u32Value); \
11267 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11268 } while (0)
11269#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11270
11271#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11272#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11273#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11274 do { \
11275 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11276 *pu32Reg -= (a_u32Value); \
11277 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11278 } while (0)
11279#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11280#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11281
11282#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11283#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11284#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11285#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11286#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11287#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11288#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11289
11290#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11291#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11292#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11293#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11294
11295#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11296#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11297#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11298
11299#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11300#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11301#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11302
11303#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11304#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11305#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11306
11307#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11308#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11309#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11310
11311#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11312
11313#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11314
11315#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11316#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11317#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11318 do { \
11319 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11320 *pu32Reg &= (a_u32Value); \
11321 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11322 } while (0)
11323#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11324
11325#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11326#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11327#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11328 do { \
11329 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11330 *pu32Reg |= (a_u32Value); \
11331 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11332 } while (0)
11333#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11334
11335
11336/** @note Not for IOPL or IF modification. */
11337#define IEM_MC_SET_EFL_BIT(a_fBit) do { IEM_GET_CTX(pVCpu)->eflags.u |= (a_fBit); } while (0)
11338/** @note Not for IOPL or IF modification. */
11339#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { IEM_GET_CTX(pVCpu)->eflags.u &= ~(a_fBit); } while (0)
11340/** @note Not for IOPL or IF modification. */
11341#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { IEM_GET_CTX(pVCpu)->eflags.u ^= (a_fBit); } while (0)
11342
11343#define IEM_MC_CLEAR_FSW_EX() do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11344
11345/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11346#define IEM_MC_FPU_TO_MMX_MODE() do { \
11347 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11348 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0xff; \
11349 } while (0)
11350
11351/** Switches the FPU state from MMX mode (FTW=0xffff). */
11352#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11353 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FTW = 0; \
11354 } while (0)
11355
11356#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11357 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11358#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11359 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11360#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11361 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11362 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11363 } while (0)
11364#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11365 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11366 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11367 } while (0)
11368#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11369 (a_pu64Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11370#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11371 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11372#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11373 (a_pu32Dst) = ((uint32_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11374
11375#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11376 do { (a_u128Value).au64[0] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11377 (a_u128Value).au64[1] = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11378 } while (0)
11379#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11380 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11381#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11382 do { (a_u32Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11383#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11384 do { (a_u64Value) = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11385#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11386 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11387 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11388 } while (0)
11389#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11390 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11391#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11392 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11393 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11394 } while (0)
11395#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11396 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11397#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11398 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11399 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11400 } while (0)
11401#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11402 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11403#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11404 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11405#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11406 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11407#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11408 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11409#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11410 do { IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11411 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11412 IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11413 = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11414 } while (0)
11415
11416#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11417 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11418 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11419 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11420 } while (0)
11421#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11422 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11423 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11424 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11425 } while (0)
11426#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11427 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11428 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11429 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11430 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11431 } while (0)
11432#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11433 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11434 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11435 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11436 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11437 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11438 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11439 } while (0)
11440
11441#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11442#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11443 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11444 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11445 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11446 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11447 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11448 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11449 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11450 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11451 } while (0)
11452#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11453 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11454 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11455 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11456 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11457 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11458 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11459 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11460 } while (0)
11461#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11462 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11463 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11464 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11465 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11466 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11467 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11468 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11469 } while (0)
11470#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11471 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11472 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11473 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11474 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11475 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11476 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11477 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11478 } while (0)
11479
11480#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11481 (a_pu128Dst) = (&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11482#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11483 (a_pu128Dst) = ((PCRTUINT128U)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11484#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11485 (a_pu64Dst) = ((uint64_t const *)&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11486#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11487 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11488 uintptr_t const iYRegTmp = (a_iYReg); \
11489 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11490 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11491 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11492 } while (0)
11493
11494#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11495 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11496 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11497 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11498 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11499 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11500 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11501 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11502 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11503 } while (0)
11504#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11505 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11506 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11507 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11508 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11509 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11510 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11511 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11512 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11513 } while (0)
11514#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11515 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11516 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11517 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11518 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11519 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11520 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11521 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11522 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11523 } while (0)
11524
11525#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11526 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11527 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11528 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11529 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11530 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11532 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11533 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11534 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11535 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11536 } while (0)
11537#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11538 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11539 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11540 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11541 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11542 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11543 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11544 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11545 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11546 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11547 } while (0)
11548#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11549 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11550 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11551 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11552 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11553 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11554 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11555 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11556 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11557 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11558 } while (0)
11559#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11560 do { PX86XSAVEAREA pXStateTmp = IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState); \
11561 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11562 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11564 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11566 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11567 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11568 } while (0)
11569
11570#ifndef IEM_WITH_SETJMP
11571# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11572 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11573# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11574 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11575# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11576 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11577#else
11578# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11579 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11580# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11581 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11582# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11583 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11584#endif
11585
11586#ifndef IEM_WITH_SETJMP
11587# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11588 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11589# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11590 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11591# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11592 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11593#else
11594# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11595 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11596# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11597 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11598# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11599 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11600#endif
11601
11602#ifndef IEM_WITH_SETJMP
11603# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11604 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11605# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11607# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11608 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11609#else
11610# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11611 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11612# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11613 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11614# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11615 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11616#endif
11617
11618#ifdef SOME_UNUSED_FUNCTION
11619# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11620 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11621#endif
11622
11623#ifndef IEM_WITH_SETJMP
11624# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11625 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11626# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11627 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11628# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11629 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11630# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11631 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11632#else
11633# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11634 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11635# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11636 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11637# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11638 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11639# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11640 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11641#endif
11642
11643#ifndef IEM_WITH_SETJMP
11644# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11645 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11646# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11648# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11650#else
11651# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11652 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11653# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11654 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11655# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11656 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11657#endif
11658
11659#ifndef IEM_WITH_SETJMP
11660# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11664#else
11665# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11666 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11667# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11668 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11669#endif
11670
11671#ifndef IEM_WITH_SETJMP
11672# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11673 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11674# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11676#else
11677# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11678 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11679# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11680 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11681#endif
11682
11683
11684
11685#ifndef IEM_WITH_SETJMP
11686# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11687 do { \
11688 uint8_t u8Tmp; \
11689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11690 (a_u16Dst) = u8Tmp; \
11691 } while (0)
11692# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11693 do { \
11694 uint8_t u8Tmp; \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11696 (a_u32Dst) = u8Tmp; \
11697 } while (0)
11698# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11699 do { \
11700 uint8_t u8Tmp; \
11701 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11702 (a_u64Dst) = u8Tmp; \
11703 } while (0)
11704# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11705 do { \
11706 uint16_t u16Tmp; \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11708 (a_u32Dst) = u16Tmp; \
11709 } while (0)
11710# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11711 do { \
11712 uint16_t u16Tmp; \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11714 (a_u64Dst) = u16Tmp; \
11715 } while (0)
11716# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11717 do { \
11718 uint32_t u32Tmp; \
11719 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11720 (a_u64Dst) = u32Tmp; \
11721 } while (0)
11722#else /* IEM_WITH_SETJMP */
11723# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11724 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11725# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11726 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11727# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11728 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11729# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11730 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11731# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11732 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11733# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11734 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11735#endif /* IEM_WITH_SETJMP */
11736
11737#ifndef IEM_WITH_SETJMP
11738# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11739 do { \
11740 uint8_t u8Tmp; \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11742 (a_u16Dst) = (int8_t)u8Tmp; \
11743 } while (0)
11744# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11745 do { \
11746 uint8_t u8Tmp; \
11747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11748 (a_u32Dst) = (int8_t)u8Tmp; \
11749 } while (0)
11750# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11751 do { \
11752 uint8_t u8Tmp; \
11753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11754 (a_u64Dst) = (int8_t)u8Tmp; \
11755 } while (0)
11756# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11757 do { \
11758 uint16_t u16Tmp; \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11760 (a_u32Dst) = (int16_t)u16Tmp; \
11761 } while (0)
11762# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11763 do { \
11764 uint16_t u16Tmp; \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11766 (a_u64Dst) = (int16_t)u16Tmp; \
11767 } while (0)
11768# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11769 do { \
11770 uint32_t u32Tmp; \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11772 (a_u64Dst) = (int32_t)u32Tmp; \
11773 } while (0)
11774#else /* IEM_WITH_SETJMP */
11775# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11776 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11777# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11778 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11779# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11780 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11781# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11782 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11783# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11784 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11785# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11786 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11787#endif /* IEM_WITH_SETJMP */
11788
11789#ifndef IEM_WITH_SETJMP
11790# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11791 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11792# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11793 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11794# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11795 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11796# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11797 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11798#else
11799# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11800 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11801# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11802 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11803# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11804 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11805# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11806 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11807#endif
11808
11809#ifndef IEM_WITH_SETJMP
11810# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11811 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11812# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11813 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11814# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11815 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11816# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11818#else
11819# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11820 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11821# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11822 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11823# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11824 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11825# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11826 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11827#endif
11828
11829#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11830#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11831#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11832#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11833#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11834#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11835#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11836 do { \
11837 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11838 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11839 } while (0)
11840
11841#ifndef IEM_WITH_SETJMP
11842# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11844# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11846#else
11847# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11848 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11849# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11850 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11851#endif
11852
11853#ifndef IEM_WITH_SETJMP
11854# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11855 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11856# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11858#else
11859# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11860 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11861# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11862 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11863#endif
11864
11865
11866#define IEM_MC_PUSH_U16(a_u16Value) \
11867 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11868#define IEM_MC_PUSH_U32(a_u32Value) \
11869 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11870#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11871 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11872#define IEM_MC_PUSH_U64(a_u64Value) \
11873 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11874
11875#define IEM_MC_POP_U16(a_pu16Value) \
11876 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11877#define IEM_MC_POP_U32(a_pu32Value) \
11878 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11879#define IEM_MC_POP_U64(a_pu64Value) \
11880 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11881
11882/** Maps guest memory for direct or bounce buffered access.
11883 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11884 * @remarks May return.
11885 */
11886#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11887 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11888
11889/** Maps guest memory for direct or bounce buffered access.
11890 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11891 * @remarks May return.
11892 */
11893#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11894 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11895
11896/** Commits the memory and unmaps the guest memory.
11897 * @remarks May return.
11898 */
11899#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11900 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11901
11902/** Commits the memory and unmaps the guest memory unless the FPU status word
11903 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11904 * that would cause FLD not to store.
11905 *
11906 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11907 * store, while \#P will not.
11908 *
11909 * @remarks May in theory return - for now.
11910 */
11911#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11912 do { \
11913 if ( !(a_u16FSW & X86_FSW_ES) \
11914 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11915 & ~(IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11916 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11917 } while (0)
11918
11919/** Calculate efficient address from R/M. */
11920#ifndef IEM_WITH_SETJMP
11921# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11922 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11923#else
11924# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11925 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11926#endif
11927
11928#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11929#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11930#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11931#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11932#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11933#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11934#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11935
11936/**
11937 * Defers the rest of the instruction emulation to a C implementation routine
11938 * and returns, only taking the standard parameters.
11939 *
11940 * @param a_pfnCImpl The pointer to the C routine.
11941 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11942 */
11943#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11944
11945/**
11946 * Defers the rest of instruction emulation to a C implementation routine and
11947 * returns, taking one argument in addition to the standard ones.
11948 *
11949 * @param a_pfnCImpl The pointer to the C routine.
11950 * @param a0 The argument.
11951 */
11952#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11953
11954/**
11955 * Defers the rest of the instruction emulation to a C implementation routine
11956 * and returns, taking two arguments in addition to the standard ones.
11957 *
11958 * @param a_pfnCImpl The pointer to the C routine.
11959 * @param a0 The first extra argument.
11960 * @param a1 The second extra argument.
11961 */
11962#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11963
11964/**
11965 * Defers the rest of the instruction emulation to a C implementation routine
11966 * and returns, taking three arguments in addition to the standard ones.
11967 *
11968 * @param a_pfnCImpl The pointer to the C routine.
11969 * @param a0 The first extra argument.
11970 * @param a1 The second extra argument.
11971 * @param a2 The third extra argument.
11972 */
11973#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11974
11975/**
11976 * Defers the rest of the instruction emulation to a C implementation routine
11977 * and returns, taking four arguments in addition to the standard ones.
11978 *
11979 * @param a_pfnCImpl The pointer to the C routine.
11980 * @param a0 The first extra argument.
11981 * @param a1 The second extra argument.
11982 * @param a2 The third extra argument.
11983 * @param a3 The fourth extra argument.
11984 */
11985#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11986
11987/**
11988 * Defers the rest of the instruction emulation to a C implementation routine
11989 * and returns, taking two arguments in addition to the standard ones.
11990 *
11991 * @param a_pfnCImpl The pointer to the C routine.
11992 * @param a0 The first extra argument.
11993 * @param a1 The second extra argument.
11994 * @param a2 The third extra argument.
11995 * @param a3 The fourth extra argument.
11996 * @param a4 The fifth extra argument.
11997 */
11998#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11999
12000/**
12001 * Defers the entire instruction emulation to a C implementation routine and
12002 * returns, only taking the standard parameters.
12003 *
12004 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12005 *
12006 * @param a_pfnCImpl The pointer to the C routine.
12007 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12008 */
12009#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12010
12011/**
12012 * Defers the entire instruction emulation to a C implementation routine and
12013 * returns, taking one argument in addition to the standard ones.
12014 *
12015 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12016 *
12017 * @param a_pfnCImpl The pointer to the C routine.
12018 * @param a0 The argument.
12019 */
12020#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12021
12022/**
12023 * Defers the entire instruction emulation to a C implementation routine and
12024 * returns, taking two arguments in addition to the standard ones.
12025 *
12026 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12027 *
12028 * @param a_pfnCImpl The pointer to the C routine.
12029 * @param a0 The first extra argument.
12030 * @param a1 The second extra argument.
12031 */
12032#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12033
12034/**
12035 * Defers the entire instruction emulation to a C implementation routine and
12036 * returns, taking three arguments in addition to the standard ones.
12037 *
12038 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12039 *
12040 * @param a_pfnCImpl The pointer to the C routine.
12041 * @param a0 The first extra argument.
12042 * @param a1 The second extra argument.
12043 * @param a2 The third extra argument.
12044 */
12045#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12046
12047/**
12048 * Calls a FPU assembly implementation taking one visible argument.
12049 *
12050 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12051 * @param a0 The first extra argument.
12052 */
12053#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12054 do { \
12055 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0)); \
12056 } while (0)
12057
12058/**
12059 * Calls a FPU assembly implementation taking two visible arguments.
12060 *
12061 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12062 * @param a0 The first extra argument.
12063 * @param a1 The second extra argument.
12064 */
12065#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12066 do { \
12067 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12068 } while (0)
12069
12070/**
12071 * Calls a FPU assembly implementation taking three visible arguments.
12072 *
12073 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12074 * @param a0 The first extra argument.
12075 * @param a1 The second extra argument.
12076 * @param a2 The third extra argument.
12077 */
12078#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12079 do { \
12080 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12081 } while (0)
12082
12083#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12084 do { \
12085 (a_FpuData).FSW = (a_FSW); \
12086 (a_FpuData).r80Result = *(a_pr80Value); \
12087 } while (0)
12088
12089/** Pushes FPU result onto the stack. */
12090#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12091 iemFpuPushResult(pVCpu, &a_FpuData)
12092/** Pushes FPU result onto the stack and sets the FPUDP. */
12093#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12094 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12095
12096/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12097#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12098 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12099
12100/** Stores FPU result in a stack register. */
12101#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12102 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12103/** Stores FPU result in a stack register and pops the stack. */
12104#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12105 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12106/** Stores FPU result in a stack register and sets the FPUDP. */
12107#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12108 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12109/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12110 * stack. */
12111#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12112 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12113
12114/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12115#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12116 iemFpuUpdateOpcodeAndIp(pVCpu)
12117/** Free a stack register (for FFREE and FFREEP). */
12118#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12119 iemFpuStackFree(pVCpu, a_iStReg)
12120/** Increment the FPU stack pointer. */
12121#define IEM_MC_FPU_STACK_INC_TOP() \
12122 iemFpuStackIncTop(pVCpu)
12123/** Decrement the FPU stack pointer. */
12124#define IEM_MC_FPU_STACK_DEC_TOP() \
12125 iemFpuStackDecTop(pVCpu)
12126
12127/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12128#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12129 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12130/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12131#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12132 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12133/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12134#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12135 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12136/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12137#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12138 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12139/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12140 * stack. */
12141#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12142 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12143/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12144#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12145 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12146
12147/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12148#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12149 iemFpuStackUnderflow(pVCpu, a_iStDst)
12150/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12151 * stack. */
12152#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12153 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12154/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12155 * FPUDS. */
12156#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12157 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12158/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12159 * FPUDS. Pops stack. */
12160#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12161 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12162/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12163 * stack twice. */
12164#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12165 iemFpuStackUnderflowThenPopPop(pVCpu)
12166/** Raises a FPU stack underflow exception for an instruction pushing a result
12167 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12168#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12169 iemFpuStackPushUnderflow(pVCpu)
12170/** Raises a FPU stack underflow exception for an instruction pushing a result
12171 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12172#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12173 iemFpuStackPushUnderflowTwo(pVCpu)
12174
12175/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12176 * FPUIP, FPUCS and FOP. */
12177#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12178 iemFpuStackPushOverflow(pVCpu)
12179/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12180 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12181#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12182 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12183/** Prepares for using the FPU state.
12184 * Ensures that we can use the host FPU in the current context (RC+R0.
12185 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12186#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12187/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12188#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12189/** Actualizes the guest FPU state so it can be accessed and modified. */
12190#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12191
12192/** Prepares for using the SSE state.
12193 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12194 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12195#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12196/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12197#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12198/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12199#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12200
12201/** Prepares for using the AVX state.
12202 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12203 * Ensures the guest AVX state in the CPUMCTX is up to date.
12204 * @note This will include the AVX512 state too when support for it is added
12205 * due to the zero extending feature of VEX instruction. */
12206#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12207/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12208#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12209/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12210#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12211
12212/**
12213 * Calls a MMX assembly implementation taking two visible arguments.
12214 *
12215 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12216 * @param a0 The first extra argument.
12217 * @param a1 The second extra argument.
12218 */
12219#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12220 do { \
12221 IEM_MC_PREPARE_FPU_USAGE(); \
12222 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12223 } while (0)
12224
12225/**
12226 * Calls a MMX assembly implementation taking three visible arguments.
12227 *
12228 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12229 * @param a0 The first extra argument.
12230 * @param a1 The second extra argument.
12231 * @param a2 The third extra argument.
12232 */
12233#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12234 do { \
12235 IEM_MC_PREPARE_FPU_USAGE(); \
12236 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12237 } while (0)
12238
12239
12240/**
12241 * Calls a SSE assembly implementation taking two visible arguments.
12242 *
12243 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12244 * @param a0 The first extra argument.
12245 * @param a1 The second extra argument.
12246 */
12247#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12248 do { \
12249 IEM_MC_PREPARE_SSE_USAGE(); \
12250 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1)); \
12251 } while (0)
12252
12253/**
12254 * Calls a SSE assembly implementation taking three visible arguments.
12255 *
12256 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12257 * @param a0 The first extra argument.
12258 * @param a1 The second extra argument.
12259 * @param a2 The third extra argument.
12260 */
12261#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12262 do { \
12263 IEM_MC_PREPARE_SSE_USAGE(); \
12264 a_pfnAImpl(&IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12265 } while (0)
12266
12267
12268/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12269 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12270#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12271 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState), 0)
12272
12273/**
12274 * Calls a AVX assembly implementation taking two visible arguments.
12275 *
12276 * There is one implicit zero'th argument, a pointer to the extended state.
12277 *
12278 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12279 * @param a1 The first extra argument.
12280 * @param a2 The second extra argument.
12281 */
12282#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12283 do { \
12284 IEM_MC_PREPARE_AVX_USAGE(); \
12285 a_pfnAImpl(pXState, (a1), (a2)); \
12286 } while (0)
12287
12288/**
12289 * Calls a AVX assembly implementation taking three visible arguments.
12290 *
12291 * There is one implicit zero'th argument, a pointer to the extended state.
12292 *
12293 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12294 * @param a1 The first extra argument.
12295 * @param a2 The second extra argument.
12296 * @param a3 The third extra argument.
12297 */
12298#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12299 do { \
12300 IEM_MC_PREPARE_AVX_USAGE(); \
12301 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12302 } while (0)
12303
12304/** @note Not for IOPL or IF testing. */
12305#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) {
12306/** @note Not for IOPL or IF testing. */
12307#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit))) {
12308/** @note Not for IOPL or IF testing. */
12309#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits)) {
12310/** @note Not for IOPL or IF testing. */
12311#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBits))) {
12312/** @note Not for IOPL or IF testing. */
12313#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12314 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12315 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12316/** @note Not for IOPL or IF testing. */
12317#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12318 if ( !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12319 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12320/** @note Not for IOPL or IF testing. */
12321#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12322 if ( (IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12323 || !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12324 != !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12325/** @note Not for IOPL or IF testing. */
12326#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12327 if ( !(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit)) \
12328 && !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit1)) \
12329 == !!(IEM_GET_CTX(pVCpu)->eflags.u & (a_fBit2)) ) {
12330#define IEM_MC_IF_CX_IS_NZ() if (IEM_GET_CTX(pVCpu)->cx != 0) {
12331#define IEM_MC_IF_ECX_IS_NZ() if (IEM_GET_CTX(pVCpu)->ecx != 0) {
12332#define IEM_MC_IF_RCX_IS_NZ() if (IEM_GET_CTX(pVCpu)->rcx != 0) {
12333/** @note Not for IOPL or IF testing. */
12334#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12335 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12336 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12337/** @note Not for IOPL or IF testing. */
12338#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12339 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12340 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12341/** @note Not for IOPL or IF testing. */
12342#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12343 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12344 && (IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12345/** @note Not for IOPL or IF testing. */
12346#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12347 if ( IEM_GET_CTX(pVCpu)->cx != 0 \
12348 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12349/** @note Not for IOPL or IF testing. */
12350#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12351 if ( IEM_GET_CTX(pVCpu)->ecx != 0 \
12352 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12353/** @note Not for IOPL or IF testing. */
12354#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12355 if ( IEM_GET_CTX(pVCpu)->rcx != 0 \
12356 && !(IEM_GET_CTX(pVCpu)->eflags.u & a_fBit)) {
12357#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12358#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12359
12360#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12361 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12362#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12363 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12364#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12365 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12366#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12367 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12368#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12369 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12370#define IEM_MC_IF_FCW_IM() \
12371 if (IEM_GET_CTX(pVCpu)->CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12372
12373#define IEM_MC_ELSE() } else {
12374#define IEM_MC_ENDIF() } do {} while (0)
12375
12376/** @} */
12377
12378
12379/** @name Opcode Debug Helpers.
12380 * @{
12381 */
12382#ifdef VBOX_WITH_STATISTICS
12383# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12384#else
12385# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12386#endif
12387
12388#ifdef DEBUG
12389# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12390 do { \
12391 IEMOP_INC_STATS(a_Stats); \
12392 Log4(("decode - %04x:%RGv %s%s [#%u]\n", IEM_GET_CTX(pVCpu)->cs.Sel, IEM_GET_CTX(pVCpu)->rip, \
12393 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12394 } while (0)
12395
12396# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12397 do { \
12398 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12399 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12400 (void)RT_CONCAT(OP_,a_Upper); \
12401 (void)(a_fDisHints); \
12402 (void)(a_fIemHints); \
12403 } while (0)
12404
12405# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12406 do { \
12407 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12408 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12409 (void)RT_CONCAT(OP_,a_Upper); \
12410 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12411 (void)(a_fDisHints); \
12412 (void)(a_fIemHints); \
12413 } while (0)
12414
12415# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12416 do { \
12417 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12418 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12419 (void)RT_CONCAT(OP_,a_Upper); \
12420 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12421 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12422 (void)(a_fDisHints); \
12423 (void)(a_fIemHints); \
12424 } while (0)
12425
12426# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12427 do { \
12428 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12429 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12430 (void)RT_CONCAT(OP_,a_Upper); \
12431 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12432 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12433 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12434 (void)(a_fDisHints); \
12435 (void)(a_fIemHints); \
12436 } while (0)
12437
12438# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12439 do { \
12440 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12441 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12442 (void)RT_CONCAT(OP_,a_Upper); \
12443 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12444 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12445 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12446 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12447 (void)(a_fDisHints); \
12448 (void)(a_fIemHints); \
12449 } while (0)
12450
12451#else
12452# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12453
12454# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12455 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12456# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12457 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12458# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12459 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12460# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12461 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12462# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12463 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12464
12465#endif
12466
12467#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12468 IEMOP_MNEMONIC0EX(a_Lower, \
12469 #a_Lower, \
12470 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12471#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12472 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12473 #a_Lower " " #a_Op1, \
12474 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12475#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12476 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12477 #a_Lower " " #a_Op1 "," #a_Op2, \
12478 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12479#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12480 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12481 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12482 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12483#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12484 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12485 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12486 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12487
12488/** @} */
12489
12490
12491/** @name Opcode Helpers.
12492 * @{
12493 */
12494
12495#ifdef IN_RING3
12496# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12497 do { \
12498 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12499 else \
12500 { \
12501 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12502 return IEMOP_RAISE_INVALID_OPCODE(); \
12503 } \
12504 } while (0)
12505#else
12506# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12507 do { \
12508 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12509 else return IEMOP_RAISE_INVALID_OPCODE(); \
12510 } while (0)
12511#endif
12512
12513/** The instruction requires a 186 or later. */
12514#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12515# define IEMOP_HLP_MIN_186() do { } while (0)
12516#else
12517# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12518#endif
12519
12520/** The instruction requires a 286 or later. */
12521#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12522# define IEMOP_HLP_MIN_286() do { } while (0)
12523#else
12524# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12525#endif
12526
12527/** The instruction requires a 386 or later. */
12528#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12529# define IEMOP_HLP_MIN_386() do { } while (0)
12530#else
12531# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12532#endif
12533
12534/** The instruction requires a 386 or later if the given expression is true. */
12535#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12536# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12537#else
12538# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12539#endif
12540
12541/** The instruction requires a 486 or later. */
12542#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12543# define IEMOP_HLP_MIN_486() do { } while (0)
12544#else
12545# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12546#endif
12547
12548/** The instruction requires a Pentium (586) or later. */
12549#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12550# define IEMOP_HLP_MIN_586() do { } while (0)
12551#else
12552# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12553#endif
12554
12555/** The instruction requires a PentiumPro (686) or later. */
12556#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12557# define IEMOP_HLP_MIN_686() do { } while (0)
12558#else
12559# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12560#endif
12561
12562
12563/** The instruction raises an \#UD in real and V8086 mode. */
12564#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12565 do \
12566 { \
12567 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12568 else return IEMOP_RAISE_INVALID_OPCODE(); \
12569 } while (0)
12570
12571/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12572 * 64-bit mode. */
12573#define IEMOP_HLP_NO_64BIT() \
12574 do \
12575 { \
12576 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12577 return IEMOP_RAISE_INVALID_OPCODE(); \
12578 } while (0)
12579
12580/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12581 * 64-bit mode. */
12582#define IEMOP_HLP_ONLY_64BIT() \
12583 do \
12584 { \
12585 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12586 return IEMOP_RAISE_INVALID_OPCODE(); \
12587 } while (0)
12588
12589/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12590#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12591 do \
12592 { \
12593 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12594 iemRecalEffOpSize64Default(pVCpu); \
12595 } while (0)
12596
12597/** The instruction has 64-bit operand size if 64-bit mode. */
12598#define IEMOP_HLP_64BIT_OP_SIZE() \
12599 do \
12600 { \
12601 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12602 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12603 } while (0)
12604
12605/** Only a REX prefix immediately preceeding the first opcode byte takes
12606 * effect. This macro helps ensuring this as well as logging bad guest code. */
12607#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12608 do \
12609 { \
12610 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12611 { \
12612 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", \
12613 IEM_GET_CTX(pVCpu)->rip, pVCpu->iem.s.fPrefixes)); \
12614 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12615 pVCpu->iem.s.uRexB = 0; \
12616 pVCpu->iem.s.uRexIndex = 0; \
12617 pVCpu->iem.s.uRexReg = 0; \
12618 iemRecalEffOpSize(pVCpu); \
12619 } \
12620 } while (0)
12621
12622/**
12623 * Done decoding.
12624 */
12625#define IEMOP_HLP_DONE_DECODING() \
12626 do \
12627 { \
12628 /*nothing for now, maybe later... */ \
12629 } while (0)
12630
12631/**
12632 * Done decoding, raise \#UD exception if lock prefix present.
12633 */
12634#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12635 do \
12636 { \
12637 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12638 { /* likely */ } \
12639 else \
12640 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12641 } while (0)
12642
12643
12644/**
12645 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12646 * repnz or size prefixes are present, or if in real or v8086 mode.
12647 */
12648#define IEMOP_HLP_DONE_VEX_DECODING() \
12649 do \
12650 { \
12651 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12652 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12653 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12654 { /* likely */ } \
12655 else \
12656 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12657 } while (0)
12658
12659/**
12660 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12661 * repnz or size prefixes are present, or if in real or v8086 mode.
12662 */
12663#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12664 do \
12665 { \
12666 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12667 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12668 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12669 && pVCpu->iem.s.uVexLength == 0)) \
12670 { /* likely */ } \
12671 else \
12672 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12673 } while (0)
12674
12675
12676/**
12677 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12678 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12679 * register 0, or if in real or v8086 mode.
12680 */
12681#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12682 do \
12683 { \
12684 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12685 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12686 && !pVCpu->iem.s.uVex3rdReg \
12687 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12688 { /* likely */ } \
12689 else \
12690 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12691 } while (0)
12692
12693/**
12694 * Done decoding VEX, no V, L=0.
12695 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12696 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12697 */
12698#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12699 do \
12700 { \
12701 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12702 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12703 && pVCpu->iem.s.uVexLength == 0 \
12704 && pVCpu->iem.s.uVex3rdReg == 0 \
12705 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12706 { /* likely */ } \
12707 else \
12708 return IEMOP_RAISE_INVALID_OPCODE(); \
12709 } while (0)
12710
12711#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12712 do \
12713 { \
12714 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12715 { /* likely */ } \
12716 else \
12717 { \
12718 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12719 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12720 } \
12721 } while (0)
12722#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12723 do \
12724 { \
12725 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12726 { /* likely */ } \
12727 else \
12728 { \
12729 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12730 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12731 } \
12732 } while (0)
12733
12734/**
12735 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12736 * are present.
12737 */
12738#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12739 do \
12740 { \
12741 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12742 { /* likely */ } \
12743 else \
12744 return IEMOP_RAISE_INVALID_OPCODE(); \
12745 } while (0)
12746
12747
12748#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
12749/** Check and handles SVM nested-guest instruction intercept and updates
12750 * NRIP if needed. */
12751# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
12752 do \
12753 { \
12754 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
12755 { \
12756 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12757 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
12758 } \
12759 } while (0)
12760
12761/** Check and handle SVM nested-guest CR0 read intercept. */
12762# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) \
12763 do \
12764 { \
12765 if (IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr)) \
12766 { \
12767 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
12768 IEM_RETURN_SVM_VMEXIT(a_pVCpu, SVM_EXIT_READ_CR0 + (a_uCr), a_uExitInfo1, a_uExitInfo2); \
12769 } \
12770 } while (0)
12771
12772#else /* !VBOX_WITH_NESTED_HWVIRT_SVM */
12773# define IEMOP_HLP_SVM_INSTR_INTERCEPT_AND_NRIP(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12774# define IEMOP_HLP_SVM_READ_CR_INTERCEPT(a_pVCpu, a_uCr, a_uExitInfo1, a_uExitInfo2) do { } while (0)
12775#endif /* !VBOX_WITH_NESTED_HWVIRT_SVM */
12776
12777
12778/**
12779 * Calculates the effective address of a ModR/M memory operand.
12780 *
12781 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12782 *
12783 * @return Strict VBox status code.
12784 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12785 * @param bRm The ModRM byte.
12786 * @param cbImm The size of any immediate following the
12787 * effective address opcode bytes. Important for
12788 * RIP relative addressing.
12789 * @param pGCPtrEff Where to return the effective address.
12790 */
12791IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12792{
12793 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12794 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
12795# define SET_SS_DEF() \
12796 do \
12797 { \
12798 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12799 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12800 } while (0)
12801
12802 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12803 {
12804/** @todo Check the effective address size crap! */
12805 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12806 {
12807 uint16_t u16EffAddr;
12808
12809 /* Handle the disp16 form with no registers first. */
12810 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12811 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12812 else
12813 {
12814 /* Get the displacment. */
12815 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12816 {
12817 case 0: u16EffAddr = 0; break;
12818 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12819 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12820 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12821 }
12822
12823 /* Add the base and index registers to the disp. */
12824 switch (bRm & X86_MODRM_RM_MASK)
12825 {
12826 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
12827 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
12828 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
12829 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
12830 case 4: u16EffAddr += pCtx->si; break;
12831 case 5: u16EffAddr += pCtx->di; break;
12832 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
12833 case 7: u16EffAddr += pCtx->bx; break;
12834 }
12835 }
12836
12837 *pGCPtrEff = u16EffAddr;
12838 }
12839 else
12840 {
12841 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12842 uint32_t u32EffAddr;
12843
12844 /* Handle the disp32 form with no registers first. */
12845 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12846 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12847 else
12848 {
12849 /* Get the register (or SIB) value. */
12850 switch ((bRm & X86_MODRM_RM_MASK))
12851 {
12852 case 0: u32EffAddr = pCtx->eax; break;
12853 case 1: u32EffAddr = pCtx->ecx; break;
12854 case 2: u32EffAddr = pCtx->edx; break;
12855 case 3: u32EffAddr = pCtx->ebx; break;
12856 case 4: /* SIB */
12857 {
12858 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12859
12860 /* Get the index and scale it. */
12861 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12862 {
12863 case 0: u32EffAddr = pCtx->eax; break;
12864 case 1: u32EffAddr = pCtx->ecx; break;
12865 case 2: u32EffAddr = pCtx->edx; break;
12866 case 3: u32EffAddr = pCtx->ebx; break;
12867 case 4: u32EffAddr = 0; /*none */ break;
12868 case 5: u32EffAddr = pCtx->ebp; break;
12869 case 6: u32EffAddr = pCtx->esi; break;
12870 case 7: u32EffAddr = pCtx->edi; break;
12871 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12872 }
12873 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12874
12875 /* add base */
12876 switch (bSib & X86_SIB_BASE_MASK)
12877 {
12878 case 0: u32EffAddr += pCtx->eax; break;
12879 case 1: u32EffAddr += pCtx->ecx; break;
12880 case 2: u32EffAddr += pCtx->edx; break;
12881 case 3: u32EffAddr += pCtx->ebx; break;
12882 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
12883 case 5:
12884 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12885 {
12886 u32EffAddr += pCtx->ebp;
12887 SET_SS_DEF();
12888 }
12889 else
12890 {
12891 uint32_t u32Disp;
12892 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12893 u32EffAddr += u32Disp;
12894 }
12895 break;
12896 case 6: u32EffAddr += pCtx->esi; break;
12897 case 7: u32EffAddr += pCtx->edi; break;
12898 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12899 }
12900 break;
12901 }
12902 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
12903 case 6: u32EffAddr = pCtx->esi; break;
12904 case 7: u32EffAddr = pCtx->edi; break;
12905 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12906 }
12907
12908 /* Get and add the displacement. */
12909 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12910 {
12911 case 0:
12912 break;
12913 case 1:
12914 {
12915 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12916 u32EffAddr += i8Disp;
12917 break;
12918 }
12919 case 2:
12920 {
12921 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12922 u32EffAddr += u32Disp;
12923 break;
12924 }
12925 default:
12926 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12927 }
12928
12929 }
12930 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12931 *pGCPtrEff = u32EffAddr;
12932 else
12933 {
12934 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12935 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12936 }
12937 }
12938 }
12939 else
12940 {
12941 uint64_t u64EffAddr;
12942
12943 /* Handle the rip+disp32 form with no registers first. */
12944 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12945 {
12946 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12947 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12948 }
12949 else
12950 {
12951 /* Get the register (or SIB) value. */
12952 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12953 {
12954 case 0: u64EffAddr = pCtx->rax; break;
12955 case 1: u64EffAddr = pCtx->rcx; break;
12956 case 2: u64EffAddr = pCtx->rdx; break;
12957 case 3: u64EffAddr = pCtx->rbx; break;
12958 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
12959 case 6: u64EffAddr = pCtx->rsi; break;
12960 case 7: u64EffAddr = pCtx->rdi; break;
12961 case 8: u64EffAddr = pCtx->r8; break;
12962 case 9: u64EffAddr = pCtx->r9; break;
12963 case 10: u64EffAddr = pCtx->r10; break;
12964 case 11: u64EffAddr = pCtx->r11; break;
12965 case 13: u64EffAddr = pCtx->r13; break;
12966 case 14: u64EffAddr = pCtx->r14; break;
12967 case 15: u64EffAddr = pCtx->r15; break;
12968 /* SIB */
12969 case 4:
12970 case 12:
12971 {
12972 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12973
12974 /* Get the index and scale it. */
12975 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12976 {
12977 case 0: u64EffAddr = pCtx->rax; break;
12978 case 1: u64EffAddr = pCtx->rcx; break;
12979 case 2: u64EffAddr = pCtx->rdx; break;
12980 case 3: u64EffAddr = pCtx->rbx; break;
12981 case 4: u64EffAddr = 0; /*none */ break;
12982 case 5: u64EffAddr = pCtx->rbp; break;
12983 case 6: u64EffAddr = pCtx->rsi; break;
12984 case 7: u64EffAddr = pCtx->rdi; break;
12985 case 8: u64EffAddr = pCtx->r8; break;
12986 case 9: u64EffAddr = pCtx->r9; break;
12987 case 10: u64EffAddr = pCtx->r10; break;
12988 case 11: u64EffAddr = pCtx->r11; break;
12989 case 12: u64EffAddr = pCtx->r12; break;
12990 case 13: u64EffAddr = pCtx->r13; break;
12991 case 14: u64EffAddr = pCtx->r14; break;
12992 case 15: u64EffAddr = pCtx->r15; break;
12993 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12994 }
12995 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12996
12997 /* add base */
12998 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12999 {
13000 case 0: u64EffAddr += pCtx->rax; break;
13001 case 1: u64EffAddr += pCtx->rcx; break;
13002 case 2: u64EffAddr += pCtx->rdx; break;
13003 case 3: u64EffAddr += pCtx->rbx; break;
13004 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13005 case 6: u64EffAddr += pCtx->rsi; break;
13006 case 7: u64EffAddr += pCtx->rdi; break;
13007 case 8: u64EffAddr += pCtx->r8; break;
13008 case 9: u64EffAddr += pCtx->r9; break;
13009 case 10: u64EffAddr += pCtx->r10; break;
13010 case 11: u64EffAddr += pCtx->r11; break;
13011 case 12: u64EffAddr += pCtx->r12; break;
13012 case 14: u64EffAddr += pCtx->r14; break;
13013 case 15: u64EffAddr += pCtx->r15; break;
13014 /* complicated encodings */
13015 case 5:
13016 case 13:
13017 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13018 {
13019 if (!pVCpu->iem.s.uRexB)
13020 {
13021 u64EffAddr += pCtx->rbp;
13022 SET_SS_DEF();
13023 }
13024 else
13025 u64EffAddr += pCtx->r13;
13026 }
13027 else
13028 {
13029 uint32_t u32Disp;
13030 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13031 u64EffAddr += (int32_t)u32Disp;
13032 }
13033 break;
13034 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13035 }
13036 break;
13037 }
13038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13039 }
13040
13041 /* Get and add the displacement. */
13042 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13043 {
13044 case 0:
13045 break;
13046 case 1:
13047 {
13048 int8_t i8Disp;
13049 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13050 u64EffAddr += i8Disp;
13051 break;
13052 }
13053 case 2:
13054 {
13055 uint32_t u32Disp;
13056 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13057 u64EffAddr += (int32_t)u32Disp;
13058 break;
13059 }
13060 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13061 }
13062
13063 }
13064
13065 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13066 *pGCPtrEff = u64EffAddr;
13067 else
13068 {
13069 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13070 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13071 }
13072 }
13073
13074 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13075 return VINF_SUCCESS;
13076}
13077
13078
13079/**
13080 * Calculates the effective address of a ModR/M memory operand.
13081 *
13082 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13083 *
13084 * @return Strict VBox status code.
13085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13086 * @param bRm The ModRM byte.
13087 * @param cbImm The size of any immediate following the
13088 * effective address opcode bytes. Important for
13089 * RIP relative addressing.
13090 * @param pGCPtrEff Where to return the effective address.
13091 * @param offRsp RSP displacement.
13092 */
13093IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13094{
13095 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13096 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13097# define SET_SS_DEF() \
13098 do \
13099 { \
13100 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13101 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13102 } while (0)
13103
13104 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13105 {
13106/** @todo Check the effective address size crap! */
13107 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13108 {
13109 uint16_t u16EffAddr;
13110
13111 /* Handle the disp16 form with no registers first. */
13112 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13113 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13114 else
13115 {
13116 /* Get the displacment. */
13117 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13118 {
13119 case 0: u16EffAddr = 0; break;
13120 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13121 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13122 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13123 }
13124
13125 /* Add the base and index registers to the disp. */
13126 switch (bRm & X86_MODRM_RM_MASK)
13127 {
13128 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13129 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13130 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13131 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13132 case 4: u16EffAddr += pCtx->si; break;
13133 case 5: u16EffAddr += pCtx->di; break;
13134 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13135 case 7: u16EffAddr += pCtx->bx; break;
13136 }
13137 }
13138
13139 *pGCPtrEff = u16EffAddr;
13140 }
13141 else
13142 {
13143 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13144 uint32_t u32EffAddr;
13145
13146 /* Handle the disp32 form with no registers first. */
13147 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13148 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13149 else
13150 {
13151 /* Get the register (or SIB) value. */
13152 switch ((bRm & X86_MODRM_RM_MASK))
13153 {
13154 case 0: u32EffAddr = pCtx->eax; break;
13155 case 1: u32EffAddr = pCtx->ecx; break;
13156 case 2: u32EffAddr = pCtx->edx; break;
13157 case 3: u32EffAddr = pCtx->ebx; break;
13158 case 4: /* SIB */
13159 {
13160 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13161
13162 /* Get the index and scale it. */
13163 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13164 {
13165 case 0: u32EffAddr = pCtx->eax; break;
13166 case 1: u32EffAddr = pCtx->ecx; break;
13167 case 2: u32EffAddr = pCtx->edx; break;
13168 case 3: u32EffAddr = pCtx->ebx; break;
13169 case 4: u32EffAddr = 0; /*none */ break;
13170 case 5: u32EffAddr = pCtx->ebp; break;
13171 case 6: u32EffAddr = pCtx->esi; break;
13172 case 7: u32EffAddr = pCtx->edi; break;
13173 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13174 }
13175 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13176
13177 /* add base */
13178 switch (bSib & X86_SIB_BASE_MASK)
13179 {
13180 case 0: u32EffAddr += pCtx->eax; break;
13181 case 1: u32EffAddr += pCtx->ecx; break;
13182 case 2: u32EffAddr += pCtx->edx; break;
13183 case 3: u32EffAddr += pCtx->ebx; break;
13184 case 4:
13185 u32EffAddr += pCtx->esp + offRsp;
13186 SET_SS_DEF();
13187 break;
13188 case 5:
13189 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13190 {
13191 u32EffAddr += pCtx->ebp;
13192 SET_SS_DEF();
13193 }
13194 else
13195 {
13196 uint32_t u32Disp;
13197 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13198 u32EffAddr += u32Disp;
13199 }
13200 break;
13201 case 6: u32EffAddr += pCtx->esi; break;
13202 case 7: u32EffAddr += pCtx->edi; break;
13203 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13204 }
13205 break;
13206 }
13207 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13208 case 6: u32EffAddr = pCtx->esi; break;
13209 case 7: u32EffAddr = pCtx->edi; break;
13210 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13211 }
13212
13213 /* Get and add the displacement. */
13214 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13215 {
13216 case 0:
13217 break;
13218 case 1:
13219 {
13220 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13221 u32EffAddr += i8Disp;
13222 break;
13223 }
13224 case 2:
13225 {
13226 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13227 u32EffAddr += u32Disp;
13228 break;
13229 }
13230 default:
13231 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13232 }
13233
13234 }
13235 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13236 *pGCPtrEff = u32EffAddr;
13237 else
13238 {
13239 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13240 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13241 }
13242 }
13243 }
13244 else
13245 {
13246 uint64_t u64EffAddr;
13247
13248 /* Handle the rip+disp32 form with no registers first. */
13249 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13250 {
13251 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13252 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13253 }
13254 else
13255 {
13256 /* Get the register (or SIB) value. */
13257 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13258 {
13259 case 0: u64EffAddr = pCtx->rax; break;
13260 case 1: u64EffAddr = pCtx->rcx; break;
13261 case 2: u64EffAddr = pCtx->rdx; break;
13262 case 3: u64EffAddr = pCtx->rbx; break;
13263 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13264 case 6: u64EffAddr = pCtx->rsi; break;
13265 case 7: u64EffAddr = pCtx->rdi; break;
13266 case 8: u64EffAddr = pCtx->r8; break;
13267 case 9: u64EffAddr = pCtx->r9; break;
13268 case 10: u64EffAddr = pCtx->r10; break;
13269 case 11: u64EffAddr = pCtx->r11; break;
13270 case 13: u64EffAddr = pCtx->r13; break;
13271 case 14: u64EffAddr = pCtx->r14; break;
13272 case 15: u64EffAddr = pCtx->r15; break;
13273 /* SIB */
13274 case 4:
13275 case 12:
13276 {
13277 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13278
13279 /* Get the index and scale it. */
13280 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13281 {
13282 case 0: u64EffAddr = pCtx->rax; break;
13283 case 1: u64EffAddr = pCtx->rcx; break;
13284 case 2: u64EffAddr = pCtx->rdx; break;
13285 case 3: u64EffAddr = pCtx->rbx; break;
13286 case 4: u64EffAddr = 0; /*none */ break;
13287 case 5: u64EffAddr = pCtx->rbp; break;
13288 case 6: u64EffAddr = pCtx->rsi; break;
13289 case 7: u64EffAddr = pCtx->rdi; break;
13290 case 8: u64EffAddr = pCtx->r8; break;
13291 case 9: u64EffAddr = pCtx->r9; break;
13292 case 10: u64EffAddr = pCtx->r10; break;
13293 case 11: u64EffAddr = pCtx->r11; break;
13294 case 12: u64EffAddr = pCtx->r12; break;
13295 case 13: u64EffAddr = pCtx->r13; break;
13296 case 14: u64EffAddr = pCtx->r14; break;
13297 case 15: u64EffAddr = pCtx->r15; break;
13298 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13299 }
13300 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13301
13302 /* add base */
13303 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13304 {
13305 case 0: u64EffAddr += pCtx->rax; break;
13306 case 1: u64EffAddr += pCtx->rcx; break;
13307 case 2: u64EffAddr += pCtx->rdx; break;
13308 case 3: u64EffAddr += pCtx->rbx; break;
13309 case 4: u64EffAddr += pCtx->rsp + offRsp; SET_SS_DEF(); break;
13310 case 6: u64EffAddr += pCtx->rsi; break;
13311 case 7: u64EffAddr += pCtx->rdi; break;
13312 case 8: u64EffAddr += pCtx->r8; break;
13313 case 9: u64EffAddr += pCtx->r9; break;
13314 case 10: u64EffAddr += pCtx->r10; break;
13315 case 11: u64EffAddr += pCtx->r11; break;
13316 case 12: u64EffAddr += pCtx->r12; break;
13317 case 14: u64EffAddr += pCtx->r14; break;
13318 case 15: u64EffAddr += pCtx->r15; break;
13319 /* complicated encodings */
13320 case 5:
13321 case 13:
13322 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13323 {
13324 if (!pVCpu->iem.s.uRexB)
13325 {
13326 u64EffAddr += pCtx->rbp;
13327 SET_SS_DEF();
13328 }
13329 else
13330 u64EffAddr += pCtx->r13;
13331 }
13332 else
13333 {
13334 uint32_t u32Disp;
13335 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13336 u64EffAddr += (int32_t)u32Disp;
13337 }
13338 break;
13339 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13340 }
13341 break;
13342 }
13343 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13344 }
13345
13346 /* Get and add the displacement. */
13347 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13348 {
13349 case 0:
13350 break;
13351 case 1:
13352 {
13353 int8_t i8Disp;
13354 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13355 u64EffAddr += i8Disp;
13356 break;
13357 }
13358 case 2:
13359 {
13360 uint32_t u32Disp;
13361 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13362 u64EffAddr += (int32_t)u32Disp;
13363 break;
13364 }
13365 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13366 }
13367
13368 }
13369
13370 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13371 *pGCPtrEff = u64EffAddr;
13372 else
13373 {
13374 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13375 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13376 }
13377 }
13378
13379 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13380 return VINF_SUCCESS;
13381}
13382
13383
13384#ifdef IEM_WITH_SETJMP
13385/**
13386 * Calculates the effective address of a ModR/M memory operand.
13387 *
13388 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13389 *
13390 * May longjmp on internal error.
13391 *
13392 * @return The effective address.
13393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13394 * @param bRm The ModRM byte.
13395 * @param cbImm The size of any immediate following the
13396 * effective address opcode bytes. Important for
13397 * RIP relative addressing.
13398 */
13399IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13400{
13401 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13402 PCCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13403# define SET_SS_DEF() \
13404 do \
13405 { \
13406 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13407 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13408 } while (0)
13409
13410 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13411 {
13412/** @todo Check the effective address size crap! */
13413 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13414 {
13415 uint16_t u16EffAddr;
13416
13417 /* Handle the disp16 form with no registers first. */
13418 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13419 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13420 else
13421 {
13422 /* Get the displacment. */
13423 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13424 {
13425 case 0: u16EffAddr = 0; break;
13426 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13427 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13428 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13429 }
13430
13431 /* Add the base and index registers to the disp. */
13432 switch (bRm & X86_MODRM_RM_MASK)
13433 {
13434 case 0: u16EffAddr += pCtx->bx + pCtx->si; break;
13435 case 1: u16EffAddr += pCtx->bx + pCtx->di; break;
13436 case 2: u16EffAddr += pCtx->bp + pCtx->si; SET_SS_DEF(); break;
13437 case 3: u16EffAddr += pCtx->bp + pCtx->di; SET_SS_DEF(); break;
13438 case 4: u16EffAddr += pCtx->si; break;
13439 case 5: u16EffAddr += pCtx->di; break;
13440 case 6: u16EffAddr += pCtx->bp; SET_SS_DEF(); break;
13441 case 7: u16EffAddr += pCtx->bx; break;
13442 }
13443 }
13444
13445 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13446 return u16EffAddr;
13447 }
13448
13449 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13450 uint32_t u32EffAddr;
13451
13452 /* Handle the disp32 form with no registers first. */
13453 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13454 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13455 else
13456 {
13457 /* Get the register (or SIB) value. */
13458 switch ((bRm & X86_MODRM_RM_MASK))
13459 {
13460 case 0: u32EffAddr = pCtx->eax; break;
13461 case 1: u32EffAddr = pCtx->ecx; break;
13462 case 2: u32EffAddr = pCtx->edx; break;
13463 case 3: u32EffAddr = pCtx->ebx; break;
13464 case 4: /* SIB */
13465 {
13466 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13467
13468 /* Get the index and scale it. */
13469 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13470 {
13471 case 0: u32EffAddr = pCtx->eax; break;
13472 case 1: u32EffAddr = pCtx->ecx; break;
13473 case 2: u32EffAddr = pCtx->edx; break;
13474 case 3: u32EffAddr = pCtx->ebx; break;
13475 case 4: u32EffAddr = 0; /*none */ break;
13476 case 5: u32EffAddr = pCtx->ebp; break;
13477 case 6: u32EffAddr = pCtx->esi; break;
13478 case 7: u32EffAddr = pCtx->edi; break;
13479 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13480 }
13481 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13482
13483 /* add base */
13484 switch (bSib & X86_SIB_BASE_MASK)
13485 {
13486 case 0: u32EffAddr += pCtx->eax; break;
13487 case 1: u32EffAddr += pCtx->ecx; break;
13488 case 2: u32EffAddr += pCtx->edx; break;
13489 case 3: u32EffAddr += pCtx->ebx; break;
13490 case 4: u32EffAddr += pCtx->esp; SET_SS_DEF(); break;
13491 case 5:
13492 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13493 {
13494 u32EffAddr += pCtx->ebp;
13495 SET_SS_DEF();
13496 }
13497 else
13498 {
13499 uint32_t u32Disp;
13500 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13501 u32EffAddr += u32Disp;
13502 }
13503 break;
13504 case 6: u32EffAddr += pCtx->esi; break;
13505 case 7: u32EffAddr += pCtx->edi; break;
13506 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13507 }
13508 break;
13509 }
13510 case 5: u32EffAddr = pCtx->ebp; SET_SS_DEF(); break;
13511 case 6: u32EffAddr = pCtx->esi; break;
13512 case 7: u32EffAddr = pCtx->edi; break;
13513 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13514 }
13515
13516 /* Get and add the displacement. */
13517 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13518 {
13519 case 0:
13520 break;
13521 case 1:
13522 {
13523 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13524 u32EffAddr += i8Disp;
13525 break;
13526 }
13527 case 2:
13528 {
13529 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13530 u32EffAddr += u32Disp;
13531 break;
13532 }
13533 default:
13534 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13535 }
13536 }
13537
13538 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13539 {
13540 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13541 return u32EffAddr;
13542 }
13543 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13544 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13545 return u32EffAddr & UINT16_MAX;
13546 }
13547
13548 uint64_t u64EffAddr;
13549
13550 /* Handle the rip+disp32 form with no registers first. */
13551 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13552 {
13553 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13554 u64EffAddr += pCtx->rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13555 }
13556 else
13557 {
13558 /* Get the register (or SIB) value. */
13559 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13560 {
13561 case 0: u64EffAddr = pCtx->rax; break;
13562 case 1: u64EffAddr = pCtx->rcx; break;
13563 case 2: u64EffAddr = pCtx->rdx; break;
13564 case 3: u64EffAddr = pCtx->rbx; break;
13565 case 5: u64EffAddr = pCtx->rbp; SET_SS_DEF(); break;
13566 case 6: u64EffAddr = pCtx->rsi; break;
13567 case 7: u64EffAddr = pCtx->rdi; break;
13568 case 8: u64EffAddr = pCtx->r8; break;
13569 case 9: u64EffAddr = pCtx->r9; break;
13570 case 10: u64EffAddr = pCtx->r10; break;
13571 case 11: u64EffAddr = pCtx->r11; break;
13572 case 13: u64EffAddr = pCtx->r13; break;
13573 case 14: u64EffAddr = pCtx->r14; break;
13574 case 15: u64EffAddr = pCtx->r15; break;
13575 /* SIB */
13576 case 4:
13577 case 12:
13578 {
13579 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13580
13581 /* Get the index and scale it. */
13582 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13583 {
13584 case 0: u64EffAddr = pCtx->rax; break;
13585 case 1: u64EffAddr = pCtx->rcx; break;
13586 case 2: u64EffAddr = pCtx->rdx; break;
13587 case 3: u64EffAddr = pCtx->rbx; break;
13588 case 4: u64EffAddr = 0; /*none */ break;
13589 case 5: u64EffAddr = pCtx->rbp; break;
13590 case 6: u64EffAddr = pCtx->rsi; break;
13591 case 7: u64EffAddr = pCtx->rdi; break;
13592 case 8: u64EffAddr = pCtx->r8; break;
13593 case 9: u64EffAddr = pCtx->r9; break;
13594 case 10: u64EffAddr = pCtx->r10; break;
13595 case 11: u64EffAddr = pCtx->r11; break;
13596 case 12: u64EffAddr = pCtx->r12; break;
13597 case 13: u64EffAddr = pCtx->r13; break;
13598 case 14: u64EffAddr = pCtx->r14; break;
13599 case 15: u64EffAddr = pCtx->r15; break;
13600 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13601 }
13602 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13603
13604 /* add base */
13605 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13606 {
13607 case 0: u64EffAddr += pCtx->rax; break;
13608 case 1: u64EffAddr += pCtx->rcx; break;
13609 case 2: u64EffAddr += pCtx->rdx; break;
13610 case 3: u64EffAddr += pCtx->rbx; break;
13611 case 4: u64EffAddr += pCtx->rsp; SET_SS_DEF(); break;
13612 case 6: u64EffAddr += pCtx->rsi; break;
13613 case 7: u64EffAddr += pCtx->rdi; break;
13614 case 8: u64EffAddr += pCtx->r8; break;
13615 case 9: u64EffAddr += pCtx->r9; break;
13616 case 10: u64EffAddr += pCtx->r10; break;
13617 case 11: u64EffAddr += pCtx->r11; break;
13618 case 12: u64EffAddr += pCtx->r12; break;
13619 case 14: u64EffAddr += pCtx->r14; break;
13620 case 15: u64EffAddr += pCtx->r15; break;
13621 /* complicated encodings */
13622 case 5:
13623 case 13:
13624 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13625 {
13626 if (!pVCpu->iem.s.uRexB)
13627 {
13628 u64EffAddr += pCtx->rbp;
13629 SET_SS_DEF();
13630 }
13631 else
13632 u64EffAddr += pCtx->r13;
13633 }
13634 else
13635 {
13636 uint32_t u32Disp;
13637 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13638 u64EffAddr += (int32_t)u32Disp;
13639 }
13640 break;
13641 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13642 }
13643 break;
13644 }
13645 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13646 }
13647
13648 /* Get and add the displacement. */
13649 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13650 {
13651 case 0:
13652 break;
13653 case 1:
13654 {
13655 int8_t i8Disp;
13656 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13657 u64EffAddr += i8Disp;
13658 break;
13659 }
13660 case 2:
13661 {
13662 uint32_t u32Disp;
13663 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13664 u64EffAddr += (int32_t)u32Disp;
13665 break;
13666 }
13667 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13668 }
13669
13670 }
13671
13672 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13673 {
13674 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13675 return u64EffAddr;
13676 }
13677 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13678 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13679 return u64EffAddr & UINT32_MAX;
13680}
13681#endif /* IEM_WITH_SETJMP */
13682
13683/** @} */
13684
13685
13686
13687/*
13688 * Include the instructions
13689 */
13690#include "IEMAllInstructions.cpp.h"
13691
13692
13693
13694#ifdef LOG_ENABLED
13695/**
13696 * Logs the current instruction.
13697 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13698 * @param pCtx The current CPU context.
13699 * @param fSameCtx Set if we have the same context information as the VMM,
13700 * clear if we may have already executed an instruction in
13701 * our debug context. When clear, we assume IEMCPU holds
13702 * valid CPU mode info.
13703 */
13704IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, PCPUMCTX pCtx, bool fSameCtx)
13705{
13706# ifdef IN_RING3
13707 if (LogIs2Enabled())
13708 {
13709 char szInstr[256];
13710 uint32_t cbInstr = 0;
13711 if (fSameCtx)
13712 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13713 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13714 szInstr, sizeof(szInstr), &cbInstr);
13715 else
13716 {
13717 uint32_t fFlags = 0;
13718 switch (pVCpu->iem.s.enmCpuMode)
13719 {
13720 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13721 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13722 case IEMMODE_16BIT:
13723 if (!(pCtx->cr0 & X86_CR0_PE) || pCtx->eflags.Bits.u1VM)
13724 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13725 else
13726 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13727 break;
13728 }
13729 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pCtx->cs.Sel, pCtx->rip, fFlags,
13730 szInstr, sizeof(szInstr), &cbInstr);
13731 }
13732
13733 PCX86FXSTATE pFpuCtx = &pCtx->CTX_SUFF(pXState)->x87;
13734 Log2(("****\n"
13735 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13736 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13737 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13738 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13739 " %s\n"
13740 ,
13741 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->esi, pCtx->edi,
13742 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.Bits.u2IOPL, pCtx->tr.Sel,
13743 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel,
13744 pCtx->fs.Sel, pCtx->gs.Sel, pCtx->eflags.u,
13745 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13746 szInstr));
13747
13748 if (LogIs3Enabled())
13749 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13750 }
13751 else
13752# endif
13753 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n",
13754 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u));
13755 RT_NOREF_PV(pVCpu); RT_NOREF_PV(pCtx); RT_NOREF_PV(fSameCtx);
13756}
13757#endif /* LOG_ENABLED */
13758
13759
13760/**
13761 * Makes status code addjustments (pass up from I/O and access handler)
13762 * as well as maintaining statistics.
13763 *
13764 * @returns Strict VBox status code to pass up.
13765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13766 * @param rcStrict The status from executing an instruction.
13767 */
13768DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13769{
13770 if (rcStrict != VINF_SUCCESS)
13771 {
13772 if (RT_SUCCESS(rcStrict))
13773 {
13774 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13775 || rcStrict == VINF_IOM_R3_IOPORT_READ
13776 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13777 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13778 || rcStrict == VINF_IOM_R3_MMIO_READ
13779 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13780 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13781 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13782 || rcStrict == VINF_CPUM_R3_MSR_READ
13783 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13784 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13785 || rcStrict == VINF_EM_RAW_TO_R3
13786 || rcStrict == VINF_EM_RAW_EMULATE_IO_BLOCK
13787 || rcStrict == VINF_EM_TRIPLE_FAULT
13788 /* raw-mode / virt handlers only: */
13789 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13790 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13791 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13792 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13793 || rcStrict == VINF_SELM_SYNC_GDT
13794 || rcStrict == VINF_CSAM_PENDING_ACTION
13795 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13796 /* nested hw.virt codes: */
13797 || rcStrict == VINF_SVM_VMEXIT
13798 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13799/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13800 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13801#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13802 if ( rcStrict == VINF_SVM_VMEXIT
13803 && rcPassUp == VINF_SUCCESS)
13804 rcStrict = VINF_SUCCESS;
13805 else
13806#endif
13807 if (rcPassUp == VINF_SUCCESS)
13808 pVCpu->iem.s.cRetInfStatuses++;
13809 else if ( rcPassUp < VINF_EM_FIRST
13810 || rcPassUp > VINF_EM_LAST
13811 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13812 {
13813 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13814 pVCpu->iem.s.cRetPassUpStatus++;
13815 rcStrict = rcPassUp;
13816 }
13817 else
13818 {
13819 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13820 pVCpu->iem.s.cRetInfStatuses++;
13821 }
13822 }
13823 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13824 pVCpu->iem.s.cRetAspectNotImplemented++;
13825 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13826 pVCpu->iem.s.cRetInstrNotImplemented++;
13827 else
13828 pVCpu->iem.s.cRetErrStatuses++;
13829 }
13830 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13831 {
13832 pVCpu->iem.s.cRetPassUpStatus++;
13833 rcStrict = pVCpu->iem.s.rcPassUp;
13834 }
13835
13836 return rcStrict;
13837}
13838
13839
13840/**
13841 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13842 * IEMExecOneWithPrefetchedByPC.
13843 *
13844 * Similar code is found in IEMExecLots.
13845 *
13846 * @return Strict VBox status code.
13847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13848 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13849 * @param fExecuteInhibit If set, execute the instruction following CLI,
13850 * POP SS and MOV SS,GR.
13851 */
13852DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit)
13853{
13854 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13855 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13856 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13857
13858#ifdef IEM_WITH_SETJMP
13859 VBOXSTRICTRC rcStrict;
13860 jmp_buf JmpBuf;
13861 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13862 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13863 if ((rcStrict = setjmp(JmpBuf)) == 0)
13864 {
13865 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13866 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13867 }
13868 else
13869 pVCpu->iem.s.cLongJumps++;
13870 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13871#else
13872 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13873 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13874#endif
13875 if (rcStrict == VINF_SUCCESS)
13876 pVCpu->iem.s.cInstructions++;
13877 if (pVCpu->iem.s.cActiveMappings > 0)
13878 {
13879 Assert(rcStrict != VINF_SUCCESS);
13880 iemMemRollback(pVCpu);
13881 }
13882 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13883 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13884 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13885
13886//#ifdef DEBUG
13887// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13888//#endif
13889
13890 /* Execute the next instruction as well if a cli, pop ss or
13891 mov ss, Gr has just completed successfully. */
13892 if ( fExecuteInhibit
13893 && rcStrict == VINF_SUCCESS
13894 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13895 && EMGetInhibitInterruptsPC(pVCpu) == IEM_GET_CTX(pVCpu)->rip )
13896 {
13897 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13898 if (rcStrict == VINF_SUCCESS)
13899 {
13900#ifdef LOG_ENABLED
13901 iemLogCurInstr(pVCpu, IEM_GET_CTX(pVCpu), false);
13902#endif
13903#ifdef IEM_WITH_SETJMP
13904 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13905 if ((rcStrict = setjmp(JmpBuf)) == 0)
13906 {
13907 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13908 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13909 }
13910 else
13911 pVCpu->iem.s.cLongJumps++;
13912 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13913#else
13914 IEM_OPCODE_GET_NEXT_U8(&b);
13915 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13916#endif
13917 if (rcStrict == VINF_SUCCESS)
13918 pVCpu->iem.s.cInstructions++;
13919 if (pVCpu->iem.s.cActiveMappings > 0)
13920 {
13921 Assert(rcStrict != VINF_SUCCESS);
13922 iemMemRollback(pVCpu);
13923 }
13924 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13925 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13926 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13927 }
13928 else if (pVCpu->iem.s.cActiveMappings > 0)
13929 iemMemRollback(pVCpu);
13930 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13931 }
13932
13933 /*
13934 * Return value fiddling, statistics and sanity assertions.
13935 */
13936 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13937
13938 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
13939 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
13940 return rcStrict;
13941}
13942
13943
13944#ifdef IN_RC
13945/**
13946 * Re-enters raw-mode or ensure we return to ring-3.
13947 *
13948 * @returns rcStrict, maybe modified.
13949 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13950 * @param pCtx The current CPU context.
13951 * @param rcStrict The status code returne by the interpreter.
13952 */
13953DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, PCPUMCTX pCtx, VBOXSTRICTRC rcStrict)
13954{
13955 if ( !pVCpu->iem.s.fInPatchCode
13956 && ( rcStrict == VINF_SUCCESS
13957 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13958 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13959 {
13960 if (pCtx->eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13961 CPUMRawEnter(pVCpu);
13962 else
13963 {
13964 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13965 rcStrict = VINF_EM_RESCHEDULE;
13966 }
13967 }
13968 return rcStrict;
13969}
13970#endif
13971
13972
13973/**
13974 * Execute one instruction.
13975 *
13976 * @return Strict VBox status code.
13977 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13978 */
13979VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13980{
13981#ifdef LOG_ENABLED
13982 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
13983 iemLogCurInstr(pVCpu, pCtx, true);
13984#endif
13985
13986 /*
13987 * Do the decoding and emulation.
13988 */
13989 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13990 if (rcStrict == VINF_SUCCESS)
13991 rcStrict = iemExecOneInner(pVCpu, true);
13992 else if (pVCpu->iem.s.cActiveMappings > 0)
13993 iemMemRollback(pVCpu);
13994
13995#ifdef IN_RC
13996 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
13997#endif
13998 if (rcStrict != VINF_SUCCESS)
13999 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14000 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14001 return rcStrict;
14002}
14003
14004
14005VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14006{
14007 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14008 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14009
14010 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14011 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14012 if (rcStrict == VINF_SUCCESS)
14013 {
14014 rcStrict = iemExecOneInner(pVCpu, true);
14015 if (pcbWritten)
14016 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14017 }
14018 else if (pVCpu->iem.s.cActiveMappings > 0)
14019 iemMemRollback(pVCpu);
14020
14021#ifdef IN_RC
14022 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14023#endif
14024 return rcStrict;
14025}
14026
14027
14028VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14029 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14030{
14031 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14032 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14033
14034 VBOXSTRICTRC rcStrict;
14035 if ( cbOpcodeBytes
14036 && pCtx->rip == OpcodeBytesPC)
14037 {
14038 iemInitDecoder(pVCpu, false);
14039#ifdef IEM_WITH_CODE_TLB
14040 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14041 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14042 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14043 pVCpu->iem.s.offCurInstrStart = 0;
14044 pVCpu->iem.s.offInstrNextByte = 0;
14045#else
14046 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14047 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14048#endif
14049 rcStrict = VINF_SUCCESS;
14050 }
14051 else
14052 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14053 if (rcStrict == VINF_SUCCESS)
14054 rcStrict = iemExecOneInner(pVCpu, true);
14055 else if (pVCpu->iem.s.cActiveMappings > 0)
14056 iemMemRollback(pVCpu);
14057
14058#ifdef IN_RC
14059 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14060#endif
14061 return rcStrict;
14062}
14063
14064
14065VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14066{
14067 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14068 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14069
14070 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14071 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14072 if (rcStrict == VINF_SUCCESS)
14073 {
14074 rcStrict = iemExecOneInner(pVCpu, false);
14075 if (pcbWritten)
14076 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14077 }
14078 else if (pVCpu->iem.s.cActiveMappings > 0)
14079 iemMemRollback(pVCpu);
14080
14081#ifdef IN_RC
14082 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14083#endif
14084 return rcStrict;
14085}
14086
14087
14088VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14089 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14090{
14091 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14092 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14093
14094 VBOXSTRICTRC rcStrict;
14095 if ( cbOpcodeBytes
14096 && pCtx->rip == OpcodeBytesPC)
14097 {
14098 iemInitDecoder(pVCpu, true);
14099#ifdef IEM_WITH_CODE_TLB
14100 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14101 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14102 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14103 pVCpu->iem.s.offCurInstrStart = 0;
14104 pVCpu->iem.s.offInstrNextByte = 0;
14105#else
14106 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14107 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14108#endif
14109 rcStrict = VINF_SUCCESS;
14110 }
14111 else
14112 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14113 if (rcStrict == VINF_SUCCESS)
14114 rcStrict = iemExecOneInner(pVCpu, false);
14115 else if (pVCpu->iem.s.cActiveMappings > 0)
14116 iemMemRollback(pVCpu);
14117
14118#ifdef IN_RC
14119 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14120#endif
14121 return rcStrict;
14122}
14123
14124
14125/**
14126 * For debugging DISGetParamSize, may come in handy.
14127 *
14128 * @returns Strict VBox status code.
14129 * @param pVCpu The cross context virtual CPU structure of the
14130 * calling EMT.
14131 * @param pCtxCore The context core structure.
14132 * @param OpcodeBytesPC The PC of the opcode bytes.
14133 * @param pvOpcodeBytes Prefeched opcode bytes.
14134 * @param cbOpcodeBytes Number of prefetched bytes.
14135 * @param pcbWritten Where to return the number of bytes written.
14136 * Optional.
14137 */
14138VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14139 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14140 uint32_t *pcbWritten)
14141{
14142 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14143 AssertReturn(CPUMCTX2CORE(pCtx) == pCtxCore, VERR_IEM_IPE_3);
14144
14145 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14146 VBOXSTRICTRC rcStrict;
14147 if ( cbOpcodeBytes
14148 && pCtx->rip == OpcodeBytesPC)
14149 {
14150 iemInitDecoder(pVCpu, true);
14151#ifdef IEM_WITH_CODE_TLB
14152 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14153 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14154 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14155 pVCpu->iem.s.offCurInstrStart = 0;
14156 pVCpu->iem.s.offInstrNextByte = 0;
14157#else
14158 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14159 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14160#endif
14161 rcStrict = VINF_SUCCESS;
14162 }
14163 else
14164 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14165 if (rcStrict == VINF_SUCCESS)
14166 {
14167 rcStrict = iemExecOneInner(pVCpu, false);
14168 if (pcbWritten)
14169 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14170 }
14171 else if (pVCpu->iem.s.cActiveMappings > 0)
14172 iemMemRollback(pVCpu);
14173
14174#ifdef IN_RC
14175 rcStrict = iemRCRawMaybeReenter(pVCpu, pCtx, rcStrict);
14176#endif
14177 return rcStrict;
14178}
14179
14180
14181VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14182{
14183 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14184
14185 /*
14186 * See if there is an interrupt pending in TRPM, inject it if we can.
14187 */
14188 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14189
14190 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14191#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14192 bool fIntrEnabled = pCtx->hwvirt.fGif;
14193 if (fIntrEnabled)
14194 {
14195 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
14196 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
14197 else
14198 fIntrEnabled = pCtx->eflags.Bits.u1IF;
14199 }
14200#else
14201 bool fIntrEnabled = pCtx->eflags.Bits.u1IF;
14202#endif
14203 if ( fIntrEnabled
14204 && TRPMHasTrap(pVCpu)
14205 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip)
14206 {
14207 uint8_t u8TrapNo;
14208 TRPMEVENT enmType;
14209 RTGCUINT uErrCode;
14210 RTGCPTR uCr2;
14211 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14212 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14213 TRPMResetTrap(pVCpu);
14214 }
14215
14216 /*
14217 * Initial decoder init w/ prefetch, then setup setjmp.
14218 */
14219 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14220 if (rcStrict == VINF_SUCCESS)
14221 {
14222#ifdef IEM_WITH_SETJMP
14223 jmp_buf JmpBuf;
14224 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14225 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14226 pVCpu->iem.s.cActiveMappings = 0;
14227 if ((rcStrict = setjmp(JmpBuf)) == 0)
14228#endif
14229 {
14230 /*
14231 * The run loop. We limit ourselves to 4096 instructions right now.
14232 */
14233 PVM pVM = pVCpu->CTX_SUFF(pVM);
14234 uint32_t cInstr = 4096;
14235 for (;;)
14236 {
14237 /*
14238 * Log the state.
14239 */
14240#ifdef LOG_ENABLED
14241 iemLogCurInstr(pVCpu, pCtx, true);
14242#endif
14243
14244 /*
14245 * Do the decoding and emulation.
14246 */
14247 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14248 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14249 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14250 {
14251 Assert(pVCpu->iem.s.cActiveMappings == 0);
14252 pVCpu->iem.s.cInstructions++;
14253 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14254 {
14255 uint32_t fCpu = pVCpu->fLocalForcedActions
14256 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14257 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14258 | VMCPU_FF_TLB_FLUSH
14259#ifdef VBOX_WITH_RAW_MODE
14260 | VMCPU_FF_TRPM_SYNC_IDT
14261 | VMCPU_FF_SELM_SYNC_TSS
14262 | VMCPU_FF_SELM_SYNC_GDT
14263 | VMCPU_FF_SELM_SYNC_LDT
14264#endif
14265 | VMCPU_FF_INHIBIT_INTERRUPTS
14266 | VMCPU_FF_BLOCK_NMIS
14267 | VMCPU_FF_UNHALT ));
14268
14269 if (RT_LIKELY( ( !fCpu
14270 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14271 && !pCtx->rflags.Bits.u1IF) )
14272 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14273 {
14274 if (cInstr-- > 0)
14275 {
14276 Assert(pVCpu->iem.s.cActiveMappings == 0);
14277 iemReInitDecoder(pVCpu);
14278 continue;
14279 }
14280 }
14281 }
14282 Assert(pVCpu->iem.s.cActiveMappings == 0);
14283 }
14284 else if (pVCpu->iem.s.cActiveMappings > 0)
14285 iemMemRollback(pVCpu);
14286 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14287 break;
14288 }
14289 }
14290#ifdef IEM_WITH_SETJMP
14291 else
14292 {
14293 if (pVCpu->iem.s.cActiveMappings > 0)
14294 iemMemRollback(pVCpu);
14295 pVCpu->iem.s.cLongJumps++;
14296 }
14297 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14298#endif
14299
14300 /*
14301 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14302 */
14303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs));
14304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss));
14305 }
14306 else
14307 {
14308 if (pVCpu->iem.s.cActiveMappings > 0)
14309 iemMemRollback(pVCpu);
14310
14311#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14312 /*
14313 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14314 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14315 */
14316 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14317#endif
14318 }
14319
14320 /*
14321 * Maybe re-enter raw-mode and log.
14322 */
14323#ifdef IN_RC
14324 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict);
14325#endif
14326 if (rcStrict != VINF_SUCCESS)
14327 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14328 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14329 if (pcInstructions)
14330 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14331 return rcStrict;
14332}
14333
14334
14335
14336/**
14337 * Injects a trap, fault, abort, software interrupt or external interrupt.
14338 *
14339 * The parameter list matches TRPMQueryTrapAll pretty closely.
14340 *
14341 * @returns Strict VBox status code.
14342 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14343 * @param u8TrapNo The trap number.
14344 * @param enmType What type is it (trap/fault/abort), software
14345 * interrupt or hardware interrupt.
14346 * @param uErrCode The error code if applicable.
14347 * @param uCr2 The CR2 value if applicable.
14348 * @param cbInstr The instruction length (only relevant for
14349 * software interrupts).
14350 */
14351VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14352 uint8_t cbInstr)
14353{
14354 iemInitDecoder(pVCpu, false);
14355#ifdef DBGFTRACE_ENABLED
14356 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14357 u8TrapNo, enmType, uErrCode, uCr2);
14358#endif
14359
14360 uint32_t fFlags;
14361 switch (enmType)
14362 {
14363 case TRPM_HARDWARE_INT:
14364 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14365 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14366 uErrCode = uCr2 = 0;
14367 break;
14368
14369 case TRPM_SOFTWARE_INT:
14370 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14371 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14372 uErrCode = uCr2 = 0;
14373 break;
14374
14375 case TRPM_TRAP:
14376 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14377 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14378 if (u8TrapNo == X86_XCPT_PF)
14379 fFlags |= IEM_XCPT_FLAGS_CR2;
14380 switch (u8TrapNo)
14381 {
14382 case X86_XCPT_DF:
14383 case X86_XCPT_TS:
14384 case X86_XCPT_NP:
14385 case X86_XCPT_SS:
14386 case X86_XCPT_PF:
14387 case X86_XCPT_AC:
14388 fFlags |= IEM_XCPT_FLAGS_ERR;
14389 break;
14390
14391 case X86_XCPT_NMI:
14392 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14393 break;
14394 }
14395 break;
14396
14397 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14398 }
14399
14400 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14401
14402 if (pVCpu->iem.s.cActiveMappings > 0)
14403 iemMemRollback(pVCpu);
14404
14405 return rcStrict;
14406}
14407
14408
14409/**
14410 * Injects the active TRPM event.
14411 *
14412 * @returns Strict VBox status code.
14413 * @param pVCpu The cross context virtual CPU structure.
14414 */
14415VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14416{
14417#ifndef IEM_IMPLEMENTS_TASKSWITCH
14418 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14419#else
14420 uint8_t u8TrapNo;
14421 TRPMEVENT enmType;
14422 RTGCUINT uErrCode;
14423 RTGCUINTPTR uCr2;
14424 uint8_t cbInstr;
14425 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14426 if (RT_FAILURE(rc))
14427 return rc;
14428
14429 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14430# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14431 if (rcStrict == VINF_SVM_VMEXIT)
14432 rcStrict = VINF_SUCCESS;
14433# endif
14434
14435 /** @todo Are there any other codes that imply the event was successfully
14436 * delivered to the guest? See @bugref{6607}. */
14437 if ( rcStrict == VINF_SUCCESS
14438 || rcStrict == VINF_IEM_RAISED_XCPT)
14439 TRPMResetTrap(pVCpu);
14440
14441 return rcStrict;
14442#endif
14443}
14444
14445
14446VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14447{
14448 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14449 return VERR_NOT_IMPLEMENTED;
14450}
14451
14452
14453VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14454{
14455 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14456 return VERR_NOT_IMPLEMENTED;
14457}
14458
14459
14460#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14461/**
14462 * Executes a IRET instruction with default operand size.
14463 *
14464 * This is for PATM.
14465 *
14466 * @returns VBox status code.
14467 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14468 * @param pCtxCore The register frame.
14469 */
14470VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14471{
14472 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14473
14474 iemCtxCoreToCtx(pCtx, pCtxCore);
14475 iemInitDecoder(pVCpu);
14476 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14477 if (rcStrict == VINF_SUCCESS)
14478 iemCtxToCtxCore(pCtxCore, pCtx);
14479 else
14480 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14481 pCtx->cs, pCtx->rip, pCtx->ss, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14482 return rcStrict;
14483}
14484#endif
14485
14486
14487/**
14488 * Macro used by the IEMExec* method to check the given instruction length.
14489 *
14490 * Will return on failure!
14491 *
14492 * @param a_cbInstr The given instruction length.
14493 * @param a_cbMin The minimum length.
14494 */
14495#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14496 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14497 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14498
14499
14500/**
14501 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14502 *
14503 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14504 *
14505 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14507 * @param rcStrict The status code to fiddle.
14508 */
14509DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14510{
14511 iemUninitExec(pVCpu);
14512#ifdef IN_RC
14513 return iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu),
14514 iemExecStatusCodeFiddling(pVCpu, rcStrict));
14515#else
14516 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14517#endif
14518}
14519
14520
14521/**
14522 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14523 *
14524 * This API ASSUMES that the caller has already verified that the guest code is
14525 * allowed to access the I/O port. (The I/O port is in the DX register in the
14526 * guest state.)
14527 *
14528 * @returns Strict VBox status code.
14529 * @param pVCpu The cross context virtual CPU structure.
14530 * @param cbValue The size of the I/O port access (1, 2, or 4).
14531 * @param enmAddrMode The addressing mode.
14532 * @param fRepPrefix Indicates whether a repeat prefix is used
14533 * (doesn't matter which for this instruction).
14534 * @param cbInstr The instruction length in bytes.
14535 * @param iEffSeg The effective segment address.
14536 * @param fIoChecked Whether the access to the I/O port has been
14537 * checked or not. It's typically checked in the
14538 * HM scenario.
14539 */
14540VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14541 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14542{
14543 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14544 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14545
14546 /*
14547 * State init.
14548 */
14549 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14550
14551 /*
14552 * Switch orgy for getting to the right handler.
14553 */
14554 VBOXSTRICTRC rcStrict;
14555 if (fRepPrefix)
14556 {
14557 switch (enmAddrMode)
14558 {
14559 case IEMMODE_16BIT:
14560 switch (cbValue)
14561 {
14562 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14563 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14564 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14565 default:
14566 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14567 }
14568 break;
14569
14570 case IEMMODE_32BIT:
14571 switch (cbValue)
14572 {
14573 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14574 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14575 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14576 default:
14577 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14578 }
14579 break;
14580
14581 case IEMMODE_64BIT:
14582 switch (cbValue)
14583 {
14584 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14585 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14586 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14587 default:
14588 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14589 }
14590 break;
14591
14592 default:
14593 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14594 }
14595 }
14596 else
14597 {
14598 switch (enmAddrMode)
14599 {
14600 case IEMMODE_16BIT:
14601 switch (cbValue)
14602 {
14603 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14604 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14605 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14606 default:
14607 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14608 }
14609 break;
14610
14611 case IEMMODE_32BIT:
14612 switch (cbValue)
14613 {
14614 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14615 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14616 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14617 default:
14618 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14619 }
14620 break;
14621
14622 case IEMMODE_64BIT:
14623 switch (cbValue)
14624 {
14625 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14626 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14627 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14628 default:
14629 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14630 }
14631 break;
14632
14633 default:
14634 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14635 }
14636 }
14637
14638 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14639}
14640
14641
14642/**
14643 * Interface for HM and EM for executing string I/O IN (read) instructions.
14644 *
14645 * This API ASSUMES that the caller has already verified that the guest code is
14646 * allowed to access the I/O port. (The I/O port is in the DX register in the
14647 * guest state.)
14648 *
14649 * @returns Strict VBox status code.
14650 * @param pVCpu The cross context virtual CPU structure.
14651 * @param cbValue The size of the I/O port access (1, 2, or 4).
14652 * @param enmAddrMode The addressing mode.
14653 * @param fRepPrefix Indicates whether a repeat prefix is used
14654 * (doesn't matter which for this instruction).
14655 * @param cbInstr The instruction length in bytes.
14656 * @param fIoChecked Whether the access to the I/O port has been
14657 * checked or not. It's typically checked in the
14658 * HM scenario.
14659 */
14660VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14661 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14662{
14663 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14664
14665 /*
14666 * State init.
14667 */
14668 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14669
14670 /*
14671 * Switch orgy for getting to the right handler.
14672 */
14673 VBOXSTRICTRC rcStrict;
14674 if (fRepPrefix)
14675 {
14676 switch (enmAddrMode)
14677 {
14678 case IEMMODE_16BIT:
14679 switch (cbValue)
14680 {
14681 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14682 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14683 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14684 default:
14685 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14686 }
14687 break;
14688
14689 case IEMMODE_32BIT:
14690 switch (cbValue)
14691 {
14692 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14693 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14694 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14695 default:
14696 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14697 }
14698 break;
14699
14700 case IEMMODE_64BIT:
14701 switch (cbValue)
14702 {
14703 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14704 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14705 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14706 default:
14707 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14708 }
14709 break;
14710
14711 default:
14712 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14713 }
14714 }
14715 else
14716 {
14717 switch (enmAddrMode)
14718 {
14719 case IEMMODE_16BIT:
14720 switch (cbValue)
14721 {
14722 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14723 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14724 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14725 default:
14726 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14727 }
14728 break;
14729
14730 case IEMMODE_32BIT:
14731 switch (cbValue)
14732 {
14733 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14734 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14735 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14736 default:
14737 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14738 }
14739 break;
14740
14741 case IEMMODE_64BIT:
14742 switch (cbValue)
14743 {
14744 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14745 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14746 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14747 default:
14748 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14749 }
14750 break;
14751
14752 default:
14753 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14754 }
14755 }
14756
14757 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14758}
14759
14760
14761/**
14762 * Interface for rawmode to write execute an OUT instruction.
14763 *
14764 * @returns Strict VBox status code.
14765 * @param pVCpu The cross context virtual CPU structure.
14766 * @param cbInstr The instruction length in bytes.
14767 * @param u16Port The port to read.
14768 * @param cbReg The register size.
14769 *
14770 * @remarks In ring-0 not all of the state needs to be synced in.
14771 */
14772VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14773{
14774 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14775 Assert(cbReg <= 4 && cbReg != 3);
14776
14777 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14778 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14779 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14780}
14781
14782
14783/**
14784 * Interface for rawmode to write execute an IN instruction.
14785 *
14786 * @returns Strict VBox status code.
14787 * @param pVCpu The cross context virtual CPU structure.
14788 * @param cbInstr The instruction length in bytes.
14789 * @param u16Port The port to read.
14790 * @param cbReg The register size.
14791 */
14792VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14793{
14794 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14795 Assert(cbReg <= 4 && cbReg != 3);
14796
14797 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14798 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14799 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14800}
14801
14802
14803/**
14804 * Interface for HM and EM to write to a CRx register.
14805 *
14806 * @returns Strict VBox status code.
14807 * @param pVCpu The cross context virtual CPU structure.
14808 * @param cbInstr The instruction length in bytes.
14809 * @param iCrReg The control register number (destination).
14810 * @param iGReg The general purpose register number (source).
14811 *
14812 * @remarks In ring-0 not all of the state needs to be synced in.
14813 */
14814VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14815{
14816 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14817 Assert(iCrReg < 16);
14818 Assert(iGReg < 16);
14819
14820 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14821 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14822 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14823}
14824
14825
14826/**
14827 * Interface for HM and EM to read from a CRx register.
14828 *
14829 * @returns Strict VBox status code.
14830 * @param pVCpu The cross context virtual CPU structure.
14831 * @param cbInstr The instruction length in bytes.
14832 * @param iGReg The general purpose register number (destination).
14833 * @param iCrReg The control register number (source).
14834 *
14835 * @remarks In ring-0 not all of the state needs to be synced in.
14836 */
14837VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
14838{
14839 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14840 Assert(iCrReg < 16);
14841 Assert(iGReg < 16);
14842
14843 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14844 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
14845 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14846}
14847
14848
14849/**
14850 * Interface for HM and EM to clear the CR0[TS] bit.
14851 *
14852 * @returns Strict VBox status code.
14853 * @param pVCpu The cross context virtual CPU structure.
14854 * @param cbInstr The instruction length in bytes.
14855 *
14856 * @remarks In ring-0 not all of the state needs to be synced in.
14857 */
14858VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
14859{
14860 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14861
14862 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14863 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
14864 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14865}
14866
14867
14868/**
14869 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
14870 *
14871 * @returns Strict VBox status code.
14872 * @param pVCpu The cross context virtual CPU structure.
14873 * @param cbInstr The instruction length in bytes.
14874 * @param uValue The value to load into CR0.
14875 *
14876 * @remarks In ring-0 not all of the state needs to be synced in.
14877 */
14878VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
14879{
14880 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14881
14882 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14883 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
14884 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14885}
14886
14887
14888/**
14889 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
14890 *
14891 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
14892 *
14893 * @returns Strict VBox status code.
14894 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14895 * @param cbInstr The instruction length in bytes.
14896 * @remarks In ring-0 not all of the state needs to be synced in.
14897 * @thread EMT(pVCpu)
14898 */
14899VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
14900{
14901 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14902
14903 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14904 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
14905 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14906}
14907
14908
14909/**
14910 * Interface for HM and EM to emulate the INVLPG instruction.
14911 *
14912 * @param pVCpu The cross context virtual CPU structure.
14913 * @param cbInstr The instruction length in bytes.
14914 * @param GCPtrPage The effective address of the page to invalidate.
14915 *
14916 * @remarks In ring-0 not all of the state needs to be synced in.
14917 */
14918VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
14919{
14920 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14921
14922 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14923 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
14924 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14925}
14926
14927
14928/**
14929 * Interface for HM and EM to emulate the INVPCID instruction.
14930 *
14931 * @param pVCpu The cross context virtual CPU structure.
14932 * @param cbInstr The instruction length in bytes.
14933 * @param uType The invalidation type.
14934 * @param GCPtrInvpcidDesc The effective address of the INVPCID descriptor.
14935 *
14936 * @remarks In ring-0 not all of the state needs to be synced in.
14937 */
14938VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPU pVCpu, uint8_t cbInstr, uint8_t uType, RTGCPTR GCPtrInvpcidDesc)
14939{
14940 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
14941
14942 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14943 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_invpcid, uType, GCPtrInvpcidDesc);
14944 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14945}
14946
14947
14948/**
14949 * Checks if IEM is in the process of delivering an event (interrupt or
14950 * exception).
14951 *
14952 * @returns true if we're in the process of raising an interrupt or exception,
14953 * false otherwise.
14954 * @param pVCpu The cross context virtual CPU structure.
14955 * @param puVector Where to store the vector associated with the
14956 * currently delivered event, optional.
14957 * @param pfFlags Where to store th event delivery flags (see
14958 * IEM_XCPT_FLAGS_XXX), optional.
14959 * @param puErr Where to store the error code associated with the
14960 * event, optional.
14961 * @param puCr2 Where to store the CR2 associated with the event,
14962 * optional.
14963 * @remarks The caller should check the flags to determine if the error code and
14964 * CR2 are valid for the event.
14965 */
14966VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
14967{
14968 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
14969 if (fRaisingXcpt)
14970 {
14971 if (puVector)
14972 *puVector = pVCpu->iem.s.uCurXcpt;
14973 if (pfFlags)
14974 *pfFlags = pVCpu->iem.s.fCurXcpt;
14975 if (puErr)
14976 *puErr = pVCpu->iem.s.uCurXcptErr;
14977 if (puCr2)
14978 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
14979 }
14980 return fRaisingXcpt;
14981}
14982
14983#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14984
14985/**
14986 * Interface for HM and EM to emulate the CLGI instruction.
14987 *
14988 * @returns Strict VBox status code.
14989 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14990 * @param cbInstr The instruction length in bytes.
14991 * @thread EMT(pVCpu)
14992 */
14993VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
14994{
14995 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
14996
14997 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14998 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
14999 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15000}
15001
15002
15003/**
15004 * Interface for HM and EM to emulate the STGI instruction.
15005 *
15006 * @returns Strict VBox status code.
15007 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15008 * @param cbInstr The instruction length in bytes.
15009 * @thread EMT(pVCpu)
15010 */
15011VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15012{
15013 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15014
15015 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15016 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15017 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15018}
15019
15020
15021/**
15022 * Interface for HM and EM to emulate the VMLOAD instruction.
15023 *
15024 * @returns Strict VBox status code.
15025 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15026 * @param cbInstr The instruction length in bytes.
15027 * @thread EMT(pVCpu)
15028 */
15029VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15030{
15031 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15032
15033 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15034 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15035 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15036}
15037
15038
15039/**
15040 * Interface for HM and EM to emulate the VMSAVE instruction.
15041 *
15042 * @returns Strict VBox status code.
15043 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15044 * @param cbInstr The instruction length in bytes.
15045 * @thread EMT(pVCpu)
15046 */
15047VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15048{
15049 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15050
15051 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15052 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15053 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15054}
15055
15056
15057/**
15058 * Interface for HM and EM to emulate the INVLPGA instruction.
15059 *
15060 * @returns Strict VBox status code.
15061 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15062 * @param cbInstr The instruction length in bytes.
15063 * @thread EMT(pVCpu)
15064 */
15065VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15066{
15067 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15068
15069 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15070 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15071 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15072}
15073
15074
15075/**
15076 * Interface for HM and EM to emulate the VMRUN instruction.
15077 *
15078 * @returns Strict VBox status code.
15079 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15080 * @param cbInstr The instruction length in bytes.
15081 * @thread EMT(pVCpu)
15082 */
15083VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15084{
15085 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15086
15087 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15088 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15089 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15090}
15091
15092
15093/**
15094 * Interface for HM and EM to emulate \#VMEXIT.
15095 *
15096 * @returns Strict VBox status code.
15097 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15098 * @param uExitCode The exit code.
15099 * @param uExitInfo1 The exit info. 1 field.
15100 * @param uExitInfo2 The exit info. 2 field.
15101 * @thread EMT(pVCpu)
15102 */
15103VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15104{
15105 IEM_CTX_ASSERT(IEM_GET_CTX(pVCpu), IEM_CPUMCTX_EXTRN_MUST_MASK);
15106 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, IEM_GET_CTX(pVCpu), uExitCode, uExitInfo1, uExitInfo2);
15107 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15108}
15109
15110#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15111#ifdef IN_RING3
15112
15113/**
15114 * Handles the unlikely and probably fatal merge cases.
15115 *
15116 * @returns Merged status code.
15117 * @param rcStrict Current EM status code.
15118 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15119 * with @a rcStrict.
15120 * @param iMemMap The memory mapping index. For error reporting only.
15121 * @param pVCpu The cross context virtual CPU structure of the calling
15122 * thread, for error reporting only.
15123 */
15124DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15125 unsigned iMemMap, PVMCPU pVCpu)
15126{
15127 if (RT_FAILURE_NP(rcStrict))
15128 return rcStrict;
15129
15130 if (RT_FAILURE_NP(rcStrictCommit))
15131 return rcStrictCommit;
15132
15133 if (rcStrict == rcStrictCommit)
15134 return rcStrictCommit;
15135
15136 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15137 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15138 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15139 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15140 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15141 return VERR_IOM_FF_STATUS_IPE;
15142}
15143
15144
15145/**
15146 * Helper for IOMR3ProcessForceFlag.
15147 *
15148 * @returns Merged status code.
15149 * @param rcStrict Current EM status code.
15150 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15151 * with @a rcStrict.
15152 * @param iMemMap The memory mapping index. For error reporting only.
15153 * @param pVCpu The cross context virtual CPU structure of the calling
15154 * thread, for error reporting only.
15155 */
15156DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15157{
15158 /* Simple. */
15159 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15160 return rcStrictCommit;
15161
15162 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15163 return rcStrict;
15164
15165 /* EM scheduling status codes. */
15166 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15167 && rcStrict <= VINF_EM_LAST))
15168 {
15169 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15170 && rcStrictCommit <= VINF_EM_LAST))
15171 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15172 }
15173
15174 /* Unlikely */
15175 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15176}
15177
15178
15179/**
15180 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15181 *
15182 * @returns Merge between @a rcStrict and what the commit operation returned.
15183 * @param pVM The cross context VM structure.
15184 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15185 * @param rcStrict The status code returned by ring-0 or raw-mode.
15186 */
15187VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15188{
15189 /*
15190 * Reset the pending commit.
15191 */
15192 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15193 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15194 ("%#x %#x %#x\n",
15195 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15196 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15197
15198 /*
15199 * Commit the pending bounce buffers (usually just one).
15200 */
15201 unsigned cBufs = 0;
15202 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15203 while (iMemMap-- > 0)
15204 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15205 {
15206 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15207 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15208 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15209
15210 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15211 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15212 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15213
15214 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15215 {
15216 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15217 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15218 pbBuf,
15219 cbFirst,
15220 PGMACCESSORIGIN_IEM);
15221 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15222 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15223 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15224 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15225 }
15226
15227 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15228 {
15229 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15230 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15231 pbBuf + cbFirst,
15232 cbSecond,
15233 PGMACCESSORIGIN_IEM);
15234 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15235 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15236 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15237 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15238 }
15239 cBufs++;
15240 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15241 }
15242
15243 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15244 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15245 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15246 pVCpu->iem.s.cActiveMappings = 0;
15247 return rcStrict;
15248}
15249
15250#endif /* IN_RING3 */
15251
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette