VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 74155

Last change on this file since 74155 was 74155, checked in by vboxsync, 6 years ago

VMM: Nested VMX: bugref:9180 VMXVDIAG naming.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 627.2 KB
Line 
1/* $Id: IEMAll.cpp 74155 2018-09-09 12:37:26Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in a 64-bit code segment.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Check if we're currently executing in real mode.
335 *
336 * @returns @c true if it is, @c false if not.
337 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
338 */
339#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
340
341/**
342 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
343 * @returns PCCPUMFEATURES
344 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
345 */
346#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
347
348/**
349 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
350 * @returns PCCPUMFEATURES
351 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
352 */
353#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
354
355/**
356 * Evaluates to true if we're presenting an Intel CPU to the guest.
357 */
358#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
359
360/**
361 * Evaluates to true if we're presenting an AMD CPU to the guest.
362 */
363#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
364
365/**
366 * Check if the address is canonical.
367 */
368#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
369
370/**
371 * Gets the effective VEX.VVVV value.
372 *
373 * The 4th bit is ignored if not 64-bit code.
374 * @returns effective V-register value.
375 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
376 */
377#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
378 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
379
380/** @def IEM_USE_UNALIGNED_DATA_ACCESS
381 * Use unaligned accesses instead of elaborate byte assembly. */
382#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
383# define IEM_USE_UNALIGNED_DATA_ACCESS
384#endif
385
386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
387/**
388 * Check if VMX is enabled.
389 */
390# define IEM_IS_VMX_ENABLED(a_pVCpu) (CPUMIsGuestVmxEnabled(IEM_GET_CTX(a_pVCpu)))
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395#define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400#define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402#else
403# define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) do { } while (0)
404# define IEM_IS_VMX_ENABLED(a_pVCpu) (false)
405# define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (false)
406# define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (false)
407
408#endif
409
410#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
411/**
412 * Check the common SVM instruction preconditions.
413 */
414# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) \
415 do { \
416 if (!IEM_IS_SVM_ENABLED(a_pVCpu)) \
417 { \
418 Log((RT_STR(a_Instr) ": EFER.SVME not enabled -> #UD\n")); \
419 return iemRaiseUndefinedOpcode(a_pVCpu); \
420 } \
421 if (IEM_IS_REAL_OR_V86_MODE(a_pVCpu)) \
422 { \
423 Log((RT_STR(a_Instr) ": Real or v8086 mode -> #UD\n")); \
424 return iemRaiseUndefinedOpcode(a_pVCpu); \
425 } \
426 if ((a_pVCpu)->iem.s.uCpl != 0) \
427 { \
428 Log((RT_STR(a_Instr) ": CPL != 0 -> #GP(0)\n")); \
429 return iemRaiseGeneralProtectionFault0(a_pVCpu); \
430 } \
431 } while (0)
432
433/**
434 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
435 */
436# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
437 do { \
438 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
439 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
440 } while (0)
441
442/**
443 * Check if SVM is enabled.
444 */
445# define IEM_IS_SVM_ENABLED(a_pVCpu) (CPUMIsGuestSvmEnabled(IEM_GET_CTX(a_pVCpu)))
446
447/**
448 * Check if an SVM control/instruction intercept is set.
449 */
450# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
451
452/**
453 * Check if an SVM read CRx intercept is set.
454 */
455# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
456
457/**
458 * Check if an SVM write CRx intercept is set.
459 */
460# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
461
462/**
463 * Check if an SVM read DRx intercept is set.
464 */
465# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
466
467/**
468 * Check if an SVM write DRx intercept is set.
469 */
470# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
471
472/**
473 * Check if an SVM exception intercept is set.
474 */
475# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
476
477/**
478 * Get the SVM pause-filter count.
479 */
480# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (CPUMGetGuestSvmPauseFilterCount(a_pVCpu, IEM_GET_CTX(a_pVCpu)))
481
482/**
483 * Invokes the SVM \#VMEXIT handler for the nested-guest.
484 */
485# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
486 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
487
488/**
489 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
490 * corresponding decode assist information.
491 */
492# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
493 do \
494 { \
495 uint64_t uExitInfo1; \
496 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
497 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
498 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
499 else \
500 uExitInfo1 = 0; \
501 IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
502 } while (0)
503
504#else
505# define IEM_SVM_INSTR_COMMON_CHECKS(a_pVCpu, a_Instr) do { } while (0)
506# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
507# define IEM_IS_SVM_ENABLED(a_pVCpu) (false)
508# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
509# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
510# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
511# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
512# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
513# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
514# define IEM_GET_SVM_PAUSE_FILTER_COUNT(a_pVCpu) (0)
515# define IEM_RETURN_SVM_VMEXIT(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
516# define IEM_RETURN_SVM_CRX_VMEXIT(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
517
518#endif
519
520
521/*********************************************************************************************************************************
522* Global Variables *
523*********************************************************************************************************************************/
524extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
525
526
527/** Function table for the ADD instruction. */
528IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
529{
530 iemAImpl_add_u8, iemAImpl_add_u8_locked,
531 iemAImpl_add_u16, iemAImpl_add_u16_locked,
532 iemAImpl_add_u32, iemAImpl_add_u32_locked,
533 iemAImpl_add_u64, iemAImpl_add_u64_locked
534};
535
536/** Function table for the ADC instruction. */
537IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
538{
539 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
540 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
541 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
542 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
543};
544
545/** Function table for the SUB instruction. */
546IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
547{
548 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
549 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
550 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
551 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
552};
553
554/** Function table for the SBB instruction. */
555IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
556{
557 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
558 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
559 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
560 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
561};
562
563/** Function table for the OR instruction. */
564IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
565{
566 iemAImpl_or_u8, iemAImpl_or_u8_locked,
567 iemAImpl_or_u16, iemAImpl_or_u16_locked,
568 iemAImpl_or_u32, iemAImpl_or_u32_locked,
569 iemAImpl_or_u64, iemAImpl_or_u64_locked
570};
571
572/** Function table for the XOR instruction. */
573IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
574{
575 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
576 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
577 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
578 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
579};
580
581/** Function table for the AND instruction. */
582IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
583{
584 iemAImpl_and_u8, iemAImpl_and_u8_locked,
585 iemAImpl_and_u16, iemAImpl_and_u16_locked,
586 iemAImpl_and_u32, iemAImpl_and_u32_locked,
587 iemAImpl_and_u64, iemAImpl_and_u64_locked
588};
589
590/** Function table for the CMP instruction.
591 * @remarks Making operand order ASSUMPTIONS.
592 */
593IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
594{
595 iemAImpl_cmp_u8, NULL,
596 iemAImpl_cmp_u16, NULL,
597 iemAImpl_cmp_u32, NULL,
598 iemAImpl_cmp_u64, NULL
599};
600
601/** Function table for the TEST instruction.
602 * @remarks Making operand order ASSUMPTIONS.
603 */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
605{
606 iemAImpl_test_u8, NULL,
607 iemAImpl_test_u16, NULL,
608 iemAImpl_test_u32, NULL,
609 iemAImpl_test_u64, NULL
610};
611
612/** Function table for the BT instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
614{
615 NULL, NULL,
616 iemAImpl_bt_u16, NULL,
617 iemAImpl_bt_u32, NULL,
618 iemAImpl_bt_u64, NULL
619};
620
621/** Function table for the BTC instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
623{
624 NULL, NULL,
625 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
626 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
627 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
628};
629
630/** Function table for the BTR instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
632{
633 NULL, NULL,
634 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
635 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
636 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
637};
638
639/** Function table for the BTS instruction. */
640IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
641{
642 NULL, NULL,
643 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
644 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
645 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
646};
647
648/** Function table for the BSF instruction. */
649IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
650{
651 NULL, NULL,
652 iemAImpl_bsf_u16, NULL,
653 iemAImpl_bsf_u32, NULL,
654 iemAImpl_bsf_u64, NULL
655};
656
657/** Function table for the BSR instruction. */
658IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
659{
660 NULL, NULL,
661 iemAImpl_bsr_u16, NULL,
662 iemAImpl_bsr_u32, NULL,
663 iemAImpl_bsr_u64, NULL
664};
665
666/** Function table for the IMUL instruction. */
667IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
668{
669 NULL, NULL,
670 iemAImpl_imul_two_u16, NULL,
671 iemAImpl_imul_two_u32, NULL,
672 iemAImpl_imul_two_u64, NULL
673};
674
675/** Group 1 /r lookup table. */
676IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
677{
678 &g_iemAImpl_add,
679 &g_iemAImpl_or,
680 &g_iemAImpl_adc,
681 &g_iemAImpl_sbb,
682 &g_iemAImpl_and,
683 &g_iemAImpl_sub,
684 &g_iemAImpl_xor,
685 &g_iemAImpl_cmp
686};
687
688/** Function table for the INC instruction. */
689IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
690{
691 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
692 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
693 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
694 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
695};
696
697/** Function table for the DEC instruction. */
698IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
699{
700 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
701 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
702 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
703 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
704};
705
706/** Function table for the NEG instruction. */
707IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
708{
709 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
710 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
711 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
712 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
713};
714
715/** Function table for the NOT instruction. */
716IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
717{
718 iemAImpl_not_u8, iemAImpl_not_u8_locked,
719 iemAImpl_not_u16, iemAImpl_not_u16_locked,
720 iemAImpl_not_u32, iemAImpl_not_u32_locked,
721 iemAImpl_not_u64, iemAImpl_not_u64_locked
722};
723
724
725/** Function table for the ROL instruction. */
726IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
727{
728 iemAImpl_rol_u8,
729 iemAImpl_rol_u16,
730 iemAImpl_rol_u32,
731 iemAImpl_rol_u64
732};
733
734/** Function table for the ROR instruction. */
735IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
736{
737 iemAImpl_ror_u8,
738 iemAImpl_ror_u16,
739 iemAImpl_ror_u32,
740 iemAImpl_ror_u64
741};
742
743/** Function table for the RCL instruction. */
744IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
745{
746 iemAImpl_rcl_u8,
747 iemAImpl_rcl_u16,
748 iemAImpl_rcl_u32,
749 iemAImpl_rcl_u64
750};
751
752/** Function table for the RCR instruction. */
753IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
754{
755 iemAImpl_rcr_u8,
756 iemAImpl_rcr_u16,
757 iemAImpl_rcr_u32,
758 iemAImpl_rcr_u64
759};
760
761/** Function table for the SHL instruction. */
762IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
763{
764 iemAImpl_shl_u8,
765 iemAImpl_shl_u16,
766 iemAImpl_shl_u32,
767 iemAImpl_shl_u64
768};
769
770/** Function table for the SHR instruction. */
771IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
772{
773 iemAImpl_shr_u8,
774 iemAImpl_shr_u16,
775 iemAImpl_shr_u32,
776 iemAImpl_shr_u64
777};
778
779/** Function table for the SAR instruction. */
780IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
781{
782 iemAImpl_sar_u8,
783 iemAImpl_sar_u16,
784 iemAImpl_sar_u32,
785 iemAImpl_sar_u64
786};
787
788
789/** Function table for the MUL instruction. */
790IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
791{
792 iemAImpl_mul_u8,
793 iemAImpl_mul_u16,
794 iemAImpl_mul_u32,
795 iemAImpl_mul_u64
796};
797
798/** Function table for the IMUL instruction working implicitly on rAX. */
799IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
800{
801 iemAImpl_imul_u8,
802 iemAImpl_imul_u16,
803 iemAImpl_imul_u32,
804 iemAImpl_imul_u64
805};
806
807/** Function table for the DIV instruction. */
808IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
809{
810 iemAImpl_div_u8,
811 iemAImpl_div_u16,
812 iemAImpl_div_u32,
813 iemAImpl_div_u64
814};
815
816/** Function table for the MUL instruction. */
817IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
818{
819 iemAImpl_idiv_u8,
820 iemAImpl_idiv_u16,
821 iemAImpl_idiv_u32,
822 iemAImpl_idiv_u64
823};
824
825/** Function table for the SHLD instruction */
826IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
827{
828 iemAImpl_shld_u16,
829 iemAImpl_shld_u32,
830 iemAImpl_shld_u64,
831};
832
833/** Function table for the SHRD instruction */
834IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
835{
836 iemAImpl_shrd_u16,
837 iemAImpl_shrd_u32,
838 iemAImpl_shrd_u64,
839};
840
841
842/** Function table for the PUNPCKLBW instruction */
843IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
844/** Function table for the PUNPCKLBD instruction */
845IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
846/** Function table for the PUNPCKLDQ instruction */
847IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
848/** Function table for the PUNPCKLQDQ instruction */
849IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
850
851/** Function table for the PUNPCKHBW instruction */
852IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
853/** Function table for the PUNPCKHBD instruction */
854IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
855/** Function table for the PUNPCKHDQ instruction */
856IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
857/** Function table for the PUNPCKHQDQ instruction */
858IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
859
860/** Function table for the PXOR instruction */
861IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
862/** Function table for the PCMPEQB instruction */
863IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
864/** Function table for the PCMPEQW instruction */
865IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
866/** Function table for the PCMPEQD instruction */
867IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
868
869
870#if defined(IEM_LOG_MEMORY_WRITES)
871/** What IEM just wrote. */
872uint8_t g_abIemWrote[256];
873/** How much IEM just wrote. */
874size_t g_cbIemWrote;
875#endif
876
877
878/*********************************************************************************************************************************
879* Internal Functions *
880*********************************************************************************************************************************/
881IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
882IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
883IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
884IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
885/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
886IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
887IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
888IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
889IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
890IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
891IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
892IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
893IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
894IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
895IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
896IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
897IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
898#ifdef IEM_WITH_SETJMP
899DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
900DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
901DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
902DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
903DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
904#endif
905
906IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
907IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
908IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
909IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
910IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
911IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
912IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
913IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
914IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
915IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
916IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
917IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
918IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
919IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
920IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
921IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
922IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
923
924#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
925IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
926IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
927#endif
928
929
930/**
931 * Sets the pass up status.
932 *
933 * @returns VINF_SUCCESS.
934 * @param pVCpu The cross context virtual CPU structure of the
935 * calling thread.
936 * @param rcPassUp The pass up status. Must be informational.
937 * VINF_SUCCESS is not allowed.
938 */
939IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
940{
941 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
942
943 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
944 if (rcOldPassUp == VINF_SUCCESS)
945 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
946 /* If both are EM scheduling codes, use EM priority rules. */
947 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
948 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
949 {
950 if (rcPassUp < rcOldPassUp)
951 {
952 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
953 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
954 }
955 else
956 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
957 }
958 /* Override EM scheduling with specific status code. */
959 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
960 {
961 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
962 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
963 }
964 /* Don't override specific status code, first come first served. */
965 else
966 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
967 return VINF_SUCCESS;
968}
969
970
971/**
972 * Calculates the CPU mode.
973 *
974 * This is mainly for updating IEMCPU::enmCpuMode.
975 *
976 * @returns CPU mode.
977 * @param pVCpu The cross context virtual CPU structure of the
978 * calling thread.
979 */
980DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
981{
982 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
983 return IEMMODE_64BIT;
984 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
985 return IEMMODE_32BIT;
986 return IEMMODE_16BIT;
987}
988
989
990/**
991 * Initializes the execution state.
992 *
993 * @param pVCpu The cross context virtual CPU structure of the
994 * calling thread.
995 * @param fBypassHandlers Whether to bypass access handlers.
996 *
997 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
998 * side-effects in strict builds.
999 */
1000DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1001{
1002 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1003 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1004
1005#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1006 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1007 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1008 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1009 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1010 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1011 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1012 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1013 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1014#endif
1015
1016#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1017 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1018#endif
1019 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1020 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1021#ifdef VBOX_STRICT
1022 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1023 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1024 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1025 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1026 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1027 pVCpu->iem.s.uRexReg = 127;
1028 pVCpu->iem.s.uRexB = 127;
1029 pVCpu->iem.s.offModRm = 127;
1030 pVCpu->iem.s.uRexIndex = 127;
1031 pVCpu->iem.s.iEffSeg = 127;
1032 pVCpu->iem.s.idxPrefix = 127;
1033 pVCpu->iem.s.uVex3rdReg = 127;
1034 pVCpu->iem.s.uVexLength = 127;
1035 pVCpu->iem.s.fEvexStuff = 127;
1036 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1037# ifdef IEM_WITH_CODE_TLB
1038 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1039 pVCpu->iem.s.pbInstrBuf = NULL;
1040 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1041 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1042 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1043 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1044# else
1045 pVCpu->iem.s.offOpcode = 127;
1046 pVCpu->iem.s.cbOpcode = 127;
1047# endif
1048#endif
1049
1050 pVCpu->iem.s.cActiveMappings = 0;
1051 pVCpu->iem.s.iNextMapping = 0;
1052 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1053 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1054#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1055 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1056 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1057 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1058 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1059 if (!pVCpu->iem.s.fInPatchCode)
1060 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1061#endif
1062}
1063
1064#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1065/**
1066 * Performs a minimal reinitialization of the execution state.
1067 *
1068 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1069 * 'world-switch' types operations on the CPU. Currently only nested
1070 * hardware-virtualization uses it.
1071 *
1072 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1073 */
1074IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1075{
1076 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1077 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1078
1079 pVCpu->iem.s.uCpl = uCpl;
1080 pVCpu->iem.s.enmCpuMode = enmMode;
1081 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1082 pVCpu->iem.s.enmEffAddrMode = enmMode;
1083 if (enmMode != IEMMODE_64BIT)
1084 {
1085 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1086 pVCpu->iem.s.enmEffOpSize = enmMode;
1087 }
1088 else
1089 {
1090 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1091 pVCpu->iem.s.enmEffOpSize = enmMode;
1092 }
1093 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1094#ifndef IEM_WITH_CODE_TLB
1095 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1096 pVCpu->iem.s.offOpcode = 0;
1097 pVCpu->iem.s.cbOpcode = 0;
1098#endif
1099 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1100}
1101#endif
1102
1103/**
1104 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1105 *
1106 * @param pVCpu The cross context virtual CPU structure of the
1107 * calling thread.
1108 */
1109DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1110{
1111 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1112#ifdef VBOX_STRICT
1113# ifdef IEM_WITH_CODE_TLB
1114 NOREF(pVCpu);
1115# else
1116 pVCpu->iem.s.cbOpcode = 0;
1117# endif
1118#else
1119 NOREF(pVCpu);
1120#endif
1121}
1122
1123
1124/**
1125 * Initializes the decoder state.
1126 *
1127 * iemReInitDecoder is mostly a copy of this function.
1128 *
1129 * @param pVCpu The cross context virtual CPU structure of the
1130 * calling thread.
1131 * @param fBypassHandlers Whether to bypass access handlers.
1132 */
1133DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1134{
1135 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1136 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1137
1138#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1140 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1141 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1142 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1143 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1144 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1145 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1146 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1147#endif
1148
1149#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1150 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1151#endif
1152 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1153 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1154 pVCpu->iem.s.enmCpuMode = enmMode;
1155 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1156 pVCpu->iem.s.enmEffAddrMode = enmMode;
1157 if (enmMode != IEMMODE_64BIT)
1158 {
1159 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1160 pVCpu->iem.s.enmEffOpSize = enmMode;
1161 }
1162 else
1163 {
1164 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1165 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1166 }
1167 pVCpu->iem.s.fPrefixes = 0;
1168 pVCpu->iem.s.uRexReg = 0;
1169 pVCpu->iem.s.uRexB = 0;
1170 pVCpu->iem.s.uRexIndex = 0;
1171 pVCpu->iem.s.idxPrefix = 0;
1172 pVCpu->iem.s.uVex3rdReg = 0;
1173 pVCpu->iem.s.uVexLength = 0;
1174 pVCpu->iem.s.fEvexStuff = 0;
1175 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1176#ifdef IEM_WITH_CODE_TLB
1177 pVCpu->iem.s.pbInstrBuf = NULL;
1178 pVCpu->iem.s.offInstrNextByte = 0;
1179 pVCpu->iem.s.offCurInstrStart = 0;
1180# ifdef VBOX_STRICT
1181 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1182 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1183 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1184# endif
1185#else
1186 pVCpu->iem.s.offOpcode = 0;
1187 pVCpu->iem.s.cbOpcode = 0;
1188#endif
1189 pVCpu->iem.s.offModRm = 0;
1190 pVCpu->iem.s.cActiveMappings = 0;
1191 pVCpu->iem.s.iNextMapping = 0;
1192 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1193 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1194#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1195 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1196 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1197 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1198 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1199 if (!pVCpu->iem.s.fInPatchCode)
1200 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1201#endif
1202
1203#ifdef DBGFTRACE_ENABLED
1204 switch (enmMode)
1205 {
1206 case IEMMODE_64BIT:
1207 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1208 break;
1209 case IEMMODE_32BIT:
1210 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1211 break;
1212 case IEMMODE_16BIT:
1213 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1214 break;
1215 }
1216#endif
1217}
1218
1219
1220/**
1221 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1222 *
1223 * This is mostly a copy of iemInitDecoder.
1224 *
1225 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1226 */
1227DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1228{
1229 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1230
1231#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1232 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1233 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1234 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1235 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1236 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1237 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1238 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1239 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1240#endif
1241
1242 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1243 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1244 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1245 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1246 pVCpu->iem.s.enmEffAddrMode = enmMode;
1247 if (enmMode != IEMMODE_64BIT)
1248 {
1249 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1250 pVCpu->iem.s.enmEffOpSize = enmMode;
1251 }
1252 else
1253 {
1254 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1255 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1256 }
1257 pVCpu->iem.s.fPrefixes = 0;
1258 pVCpu->iem.s.uRexReg = 0;
1259 pVCpu->iem.s.uRexB = 0;
1260 pVCpu->iem.s.uRexIndex = 0;
1261 pVCpu->iem.s.idxPrefix = 0;
1262 pVCpu->iem.s.uVex3rdReg = 0;
1263 pVCpu->iem.s.uVexLength = 0;
1264 pVCpu->iem.s.fEvexStuff = 0;
1265 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1266#ifdef IEM_WITH_CODE_TLB
1267 if (pVCpu->iem.s.pbInstrBuf)
1268 {
1269 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1270 - pVCpu->iem.s.uInstrBufPc;
1271 if (off < pVCpu->iem.s.cbInstrBufTotal)
1272 {
1273 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1274 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1275 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1276 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1277 else
1278 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1279 }
1280 else
1281 {
1282 pVCpu->iem.s.pbInstrBuf = NULL;
1283 pVCpu->iem.s.offInstrNextByte = 0;
1284 pVCpu->iem.s.offCurInstrStart = 0;
1285 pVCpu->iem.s.cbInstrBuf = 0;
1286 pVCpu->iem.s.cbInstrBufTotal = 0;
1287 }
1288 }
1289 else
1290 {
1291 pVCpu->iem.s.offInstrNextByte = 0;
1292 pVCpu->iem.s.offCurInstrStart = 0;
1293 pVCpu->iem.s.cbInstrBuf = 0;
1294 pVCpu->iem.s.cbInstrBufTotal = 0;
1295 }
1296#else
1297 pVCpu->iem.s.cbOpcode = 0;
1298 pVCpu->iem.s.offOpcode = 0;
1299#endif
1300 pVCpu->iem.s.offModRm = 0;
1301 Assert(pVCpu->iem.s.cActiveMappings == 0);
1302 pVCpu->iem.s.iNextMapping = 0;
1303 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1304 Assert(pVCpu->iem.s.fBypassHandlers == false);
1305#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1306 if (!pVCpu->iem.s.fInPatchCode)
1307 { /* likely */ }
1308 else
1309 {
1310 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1311 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1312 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1313 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1314 if (!pVCpu->iem.s.fInPatchCode)
1315 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1316 }
1317#endif
1318
1319#ifdef DBGFTRACE_ENABLED
1320 switch (enmMode)
1321 {
1322 case IEMMODE_64BIT:
1323 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1324 break;
1325 case IEMMODE_32BIT:
1326 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1327 break;
1328 case IEMMODE_16BIT:
1329 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1330 break;
1331 }
1332#endif
1333}
1334
1335
1336
1337/**
1338 * Prefetch opcodes the first time when starting executing.
1339 *
1340 * @returns Strict VBox status code.
1341 * @param pVCpu The cross context virtual CPU structure of the
1342 * calling thread.
1343 * @param fBypassHandlers Whether to bypass access handlers.
1344 */
1345IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1346{
1347 iemInitDecoder(pVCpu, fBypassHandlers);
1348
1349#ifdef IEM_WITH_CODE_TLB
1350 /** @todo Do ITLB lookup here. */
1351
1352#else /* !IEM_WITH_CODE_TLB */
1353
1354 /*
1355 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1356 *
1357 * First translate CS:rIP to a physical address.
1358 */
1359 uint32_t cbToTryRead;
1360 RTGCPTR GCPtrPC;
1361 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1362 {
1363 cbToTryRead = PAGE_SIZE;
1364 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1365 if (IEM_IS_CANONICAL(GCPtrPC))
1366 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1367 else
1368 return iemRaiseGeneralProtectionFault0(pVCpu);
1369 }
1370 else
1371 {
1372 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1373 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1374 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1375 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1376 else
1377 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1378 if (cbToTryRead) { /* likely */ }
1379 else /* overflowed */
1380 {
1381 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1382 cbToTryRead = UINT32_MAX;
1383 }
1384 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1385 Assert(GCPtrPC <= UINT32_MAX);
1386 }
1387
1388# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1389 /* Allow interpretation of patch manager code blocks since they can for
1390 instance throw #PFs for perfectly good reasons. */
1391 if (pVCpu->iem.s.fInPatchCode)
1392 {
1393 size_t cbRead = 0;
1394 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1395 AssertRCReturn(rc, rc);
1396 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1397 return VINF_SUCCESS;
1398 }
1399# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1400
1401 RTGCPHYS GCPhys;
1402 uint64_t fFlags;
1403 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1404 if (RT_SUCCESS(rc)) { /* probable */ }
1405 else
1406 {
1407 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1408 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1409 }
1410 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1411 else
1412 {
1413 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1414 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1415 }
1416 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1417 else
1418 {
1419 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1420 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1421 }
1422 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1423 /** @todo Check reserved bits and such stuff. PGM is better at doing
1424 * that, so do it when implementing the guest virtual address
1425 * TLB... */
1426
1427 /*
1428 * Read the bytes at this address.
1429 */
1430 PVM pVM = pVCpu->CTX_SUFF(pVM);
1431# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1432 size_t cbActual;
1433 if ( PATMIsEnabled(pVM)
1434 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1435 {
1436 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1437 Assert(cbActual > 0);
1438 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1439 }
1440 else
1441# endif
1442 {
1443 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1444 if (cbToTryRead > cbLeftOnPage)
1445 cbToTryRead = cbLeftOnPage;
1446 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1447 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1448
1449 if (!pVCpu->iem.s.fBypassHandlers)
1450 {
1451 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1452 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1453 { /* likely */ }
1454 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1455 {
1456 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1457 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1458 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1459 }
1460 else
1461 {
1462 Log((RT_SUCCESS(rcStrict)
1463 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1464 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1465 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1466 return rcStrict;
1467 }
1468 }
1469 else
1470 {
1471 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1472 if (RT_SUCCESS(rc))
1473 { /* likely */ }
1474 else
1475 {
1476 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1477 GCPtrPC, GCPhys, rc, cbToTryRead));
1478 return rc;
1479 }
1480 }
1481 pVCpu->iem.s.cbOpcode = cbToTryRead;
1482 }
1483#endif /* !IEM_WITH_CODE_TLB */
1484 return VINF_SUCCESS;
1485}
1486
1487
1488/**
1489 * Invalidates the IEM TLBs.
1490 *
1491 * This is called internally as well as by PGM when moving GC mappings.
1492 *
1493 * @returns
1494 * @param pVCpu The cross context virtual CPU structure of the calling
1495 * thread.
1496 * @param fVmm Set when PGM calls us with a remapping.
1497 */
1498VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1499{
1500#ifdef IEM_WITH_CODE_TLB
1501 pVCpu->iem.s.cbInstrBufTotal = 0;
1502 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1503 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1504 { /* very likely */ }
1505 else
1506 {
1507 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1508 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1509 while (i-- > 0)
1510 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1511 }
1512#endif
1513
1514#ifdef IEM_WITH_DATA_TLB
1515 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1516 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1517 { /* very likely */ }
1518 else
1519 {
1520 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1521 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1522 while (i-- > 0)
1523 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1524 }
1525#endif
1526 NOREF(pVCpu); NOREF(fVmm);
1527}
1528
1529
1530/**
1531 * Invalidates a page in the TLBs.
1532 *
1533 * @param pVCpu The cross context virtual CPU structure of the calling
1534 * thread.
1535 * @param GCPtr The address of the page to invalidate
1536 */
1537VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1538{
1539#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1540 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1541 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1542 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1543 uintptr_t idx = (uint8_t)GCPtr;
1544
1545# ifdef IEM_WITH_CODE_TLB
1546 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1547 {
1548 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1549 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1550 pVCpu->iem.s.cbInstrBufTotal = 0;
1551 }
1552# endif
1553
1554# ifdef IEM_WITH_DATA_TLB
1555 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1556 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1557# endif
1558#else
1559 NOREF(pVCpu); NOREF(GCPtr);
1560#endif
1561}
1562
1563
1564/**
1565 * Invalidates the host physical aspects of the IEM TLBs.
1566 *
1567 * This is called internally as well as by PGM when moving GC mappings.
1568 *
1569 * @param pVCpu The cross context virtual CPU structure of the calling
1570 * thread.
1571 */
1572VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1573{
1574#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1575 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1576
1577# ifdef IEM_WITH_CODE_TLB
1578 pVCpu->iem.s.cbInstrBufTotal = 0;
1579# endif
1580 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1581 if (uTlbPhysRev != 0)
1582 {
1583 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1584 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1585 }
1586 else
1587 {
1588 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1589 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1590
1591 unsigned i;
1592# ifdef IEM_WITH_CODE_TLB
1593 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1594 while (i-- > 0)
1595 {
1596 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1597 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1598 }
1599# endif
1600# ifdef IEM_WITH_DATA_TLB
1601 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1602 while (i-- > 0)
1603 {
1604 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1605 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1606 }
1607# endif
1608 }
1609#else
1610 NOREF(pVCpu);
1611#endif
1612}
1613
1614
1615/**
1616 * Invalidates the host physical aspects of the IEM TLBs.
1617 *
1618 * This is called internally as well as by PGM when moving GC mappings.
1619 *
1620 * @param pVM The cross context VM structure.
1621 *
1622 * @remarks Caller holds the PGM lock.
1623 */
1624VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1625{
1626 RT_NOREF_PV(pVM);
1627}
1628
1629#ifdef IEM_WITH_CODE_TLB
1630
1631/**
1632 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1633 * failure and jumps.
1634 *
1635 * We end up here for a number of reasons:
1636 * - pbInstrBuf isn't yet initialized.
1637 * - Advancing beyond the buffer boundrary (e.g. cross page).
1638 * - Advancing beyond the CS segment limit.
1639 * - Fetching from non-mappable page (e.g. MMIO).
1640 *
1641 * @param pVCpu The cross context virtual CPU structure of the
1642 * calling thread.
1643 * @param pvDst Where to return the bytes.
1644 * @param cbDst Number of bytes to read.
1645 *
1646 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1647 */
1648IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1649{
1650#ifdef IN_RING3
1651 for (;;)
1652 {
1653 Assert(cbDst <= 8);
1654 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1655
1656 /*
1657 * We might have a partial buffer match, deal with that first to make the
1658 * rest simpler. This is the first part of the cross page/buffer case.
1659 */
1660 if (pVCpu->iem.s.pbInstrBuf != NULL)
1661 {
1662 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1663 {
1664 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1665 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1666 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1667
1668 cbDst -= cbCopy;
1669 pvDst = (uint8_t *)pvDst + cbCopy;
1670 offBuf += cbCopy;
1671 pVCpu->iem.s.offInstrNextByte += offBuf;
1672 }
1673 }
1674
1675 /*
1676 * Check segment limit, figuring how much we're allowed to access at this point.
1677 *
1678 * We will fault immediately if RIP is past the segment limit / in non-canonical
1679 * territory. If we do continue, there are one or more bytes to read before we
1680 * end up in trouble and we need to do that first before faulting.
1681 */
1682 RTGCPTR GCPtrFirst;
1683 uint32_t cbMaxRead;
1684 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1685 {
1686 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1687 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1688 { /* likely */ }
1689 else
1690 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1691 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1692 }
1693 else
1694 {
1695 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1696 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1697 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1698 { /* likely */ }
1699 else
1700 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1701 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1702 if (cbMaxRead != 0)
1703 { /* likely */ }
1704 else
1705 {
1706 /* Overflowed because address is 0 and limit is max. */
1707 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1708 cbMaxRead = X86_PAGE_SIZE;
1709 }
1710 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1711 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1712 if (cbMaxRead2 < cbMaxRead)
1713 cbMaxRead = cbMaxRead2;
1714 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1715 }
1716
1717 /*
1718 * Get the TLB entry for this piece of code.
1719 */
1720 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1721 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1722 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1723 if (pTlbe->uTag == uTag)
1724 {
1725 /* likely when executing lots of code, otherwise unlikely */
1726# ifdef VBOX_WITH_STATISTICS
1727 pVCpu->iem.s.CodeTlb.cTlbHits++;
1728# endif
1729 }
1730 else
1731 {
1732 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1733# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1734 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1735 {
1736 pTlbe->uTag = uTag;
1737 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1738 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1739 pTlbe->GCPhys = NIL_RTGCPHYS;
1740 pTlbe->pbMappingR3 = NULL;
1741 }
1742 else
1743# endif
1744 {
1745 RTGCPHYS GCPhys;
1746 uint64_t fFlags;
1747 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1748 if (RT_FAILURE(rc))
1749 {
1750 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1751 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1752 }
1753
1754 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1755 pTlbe->uTag = uTag;
1756 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1757 pTlbe->GCPhys = GCPhys;
1758 pTlbe->pbMappingR3 = NULL;
1759 }
1760 }
1761
1762 /*
1763 * Check TLB page table level access flags.
1764 */
1765 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1766 {
1767 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1768 {
1769 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1770 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1771 }
1772 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1773 {
1774 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1775 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1776 }
1777 }
1778
1779# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1780 /*
1781 * Allow interpretation of patch manager code blocks since they can for
1782 * instance throw #PFs for perfectly good reasons.
1783 */
1784 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1785 { /* no unlikely */ }
1786 else
1787 {
1788 /** @todo Could be optimized this a little in ring-3 if we liked. */
1789 size_t cbRead = 0;
1790 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1791 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1792 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1793 return;
1794 }
1795# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1796
1797 /*
1798 * Look up the physical page info if necessary.
1799 */
1800 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1801 { /* not necessary */ }
1802 else
1803 {
1804 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1805 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1806 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1807 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1808 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1809 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1810 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1811 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1812 }
1813
1814# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1815 /*
1816 * Try do a direct read using the pbMappingR3 pointer.
1817 */
1818 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1819 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1820 {
1821 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1822 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1823 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1824 {
1825 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1826 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1827 }
1828 else
1829 {
1830 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1831 Assert(cbInstr < cbMaxRead);
1832 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1833 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1834 }
1835 if (cbDst <= cbMaxRead)
1836 {
1837 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1838 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1839 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1840 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1841 return;
1842 }
1843 pVCpu->iem.s.pbInstrBuf = NULL;
1844
1845 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1846 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1847 }
1848 else
1849# endif
1850#if 0
1851 /*
1852 * If there is no special read handling, so we can read a bit more and
1853 * put it in the prefetch buffer.
1854 */
1855 if ( cbDst < cbMaxRead
1856 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1857 {
1858 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1859 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1860 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1861 { /* likely */ }
1862 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1863 {
1864 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1865 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1866 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1867 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1868 }
1869 else
1870 {
1871 Log((RT_SUCCESS(rcStrict)
1872 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1873 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1874 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1875 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1876 }
1877 }
1878 /*
1879 * Special read handling, so only read exactly what's needed.
1880 * This is a highly unlikely scenario.
1881 */
1882 else
1883#endif
1884 {
1885 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1886 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1887 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1888 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1889 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1890 { /* likely */ }
1891 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1892 {
1893 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1894 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1895 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1896 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1897 }
1898 else
1899 {
1900 Log((RT_SUCCESS(rcStrict)
1901 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1902 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1903 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1904 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1905 }
1906 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1907 if (cbToRead == cbDst)
1908 return;
1909 }
1910
1911 /*
1912 * More to read, loop.
1913 */
1914 cbDst -= cbMaxRead;
1915 pvDst = (uint8_t *)pvDst + cbMaxRead;
1916 }
1917#else
1918 RT_NOREF(pvDst, cbDst);
1919 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1920#endif
1921}
1922
1923#else
1924
1925/**
1926 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1927 * exception if it fails.
1928 *
1929 * @returns Strict VBox status code.
1930 * @param pVCpu The cross context virtual CPU structure of the
1931 * calling thread.
1932 * @param cbMin The minimum number of bytes relative offOpcode
1933 * that must be read.
1934 */
1935IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1936{
1937 /*
1938 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1939 *
1940 * First translate CS:rIP to a physical address.
1941 */
1942 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1943 uint32_t cbToTryRead;
1944 RTGCPTR GCPtrNext;
1945 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1946 {
1947 cbToTryRead = PAGE_SIZE;
1948 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1949 if (!IEM_IS_CANONICAL(GCPtrNext))
1950 return iemRaiseGeneralProtectionFault0(pVCpu);
1951 }
1952 else
1953 {
1954 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1955 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1956 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1957 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1958 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1959 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1960 if (!cbToTryRead) /* overflowed */
1961 {
1962 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1963 cbToTryRead = UINT32_MAX;
1964 /** @todo check out wrapping around the code segment. */
1965 }
1966 if (cbToTryRead < cbMin - cbLeft)
1967 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1968 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1969 }
1970
1971 /* Only read up to the end of the page, and make sure we don't read more
1972 than the opcode buffer can hold. */
1973 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1974 if (cbToTryRead > cbLeftOnPage)
1975 cbToTryRead = cbLeftOnPage;
1976 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1977 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1978/** @todo r=bird: Convert assertion into undefined opcode exception? */
1979 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1980
1981# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1982 /* Allow interpretation of patch manager code blocks since they can for
1983 instance throw #PFs for perfectly good reasons. */
1984 if (pVCpu->iem.s.fInPatchCode)
1985 {
1986 size_t cbRead = 0;
1987 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1988 AssertRCReturn(rc, rc);
1989 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1990 return VINF_SUCCESS;
1991 }
1992# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1993
1994 RTGCPHYS GCPhys;
1995 uint64_t fFlags;
1996 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1997 if (RT_FAILURE(rc))
1998 {
1999 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2000 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2001 }
2002 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2003 {
2004 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2005 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2006 }
2007 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2008 {
2009 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2010 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2011 }
2012 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2013 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2014 /** @todo Check reserved bits and such stuff. PGM is better at doing
2015 * that, so do it when implementing the guest virtual address
2016 * TLB... */
2017
2018 /*
2019 * Read the bytes at this address.
2020 *
2021 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2022 * and since PATM should only patch the start of an instruction there
2023 * should be no need to check again here.
2024 */
2025 if (!pVCpu->iem.s.fBypassHandlers)
2026 {
2027 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2028 cbToTryRead, PGMACCESSORIGIN_IEM);
2029 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2030 { /* likely */ }
2031 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2032 {
2033 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2034 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2035 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2036 }
2037 else
2038 {
2039 Log((RT_SUCCESS(rcStrict)
2040 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2041 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2042 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2043 return rcStrict;
2044 }
2045 }
2046 else
2047 {
2048 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2049 if (RT_SUCCESS(rc))
2050 { /* likely */ }
2051 else
2052 {
2053 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2054 return rc;
2055 }
2056 }
2057 pVCpu->iem.s.cbOpcode += cbToTryRead;
2058 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2059
2060 return VINF_SUCCESS;
2061}
2062
2063#endif /* !IEM_WITH_CODE_TLB */
2064#ifndef IEM_WITH_SETJMP
2065
2066/**
2067 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2068 *
2069 * @returns Strict VBox status code.
2070 * @param pVCpu The cross context virtual CPU structure of the
2071 * calling thread.
2072 * @param pb Where to return the opcode byte.
2073 */
2074DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2075{
2076 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2077 if (rcStrict == VINF_SUCCESS)
2078 {
2079 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2080 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2081 pVCpu->iem.s.offOpcode = offOpcode + 1;
2082 }
2083 else
2084 *pb = 0;
2085 return rcStrict;
2086}
2087
2088
2089/**
2090 * Fetches the next opcode byte.
2091 *
2092 * @returns Strict VBox status code.
2093 * @param pVCpu The cross context virtual CPU structure of the
2094 * calling thread.
2095 * @param pu8 Where to return the opcode byte.
2096 */
2097DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2098{
2099 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2100 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2101 {
2102 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2103 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2104 return VINF_SUCCESS;
2105 }
2106 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2107}
2108
2109#else /* IEM_WITH_SETJMP */
2110
2111/**
2112 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2113 *
2114 * @returns The opcode byte.
2115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2116 */
2117DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2118{
2119# ifdef IEM_WITH_CODE_TLB
2120 uint8_t u8;
2121 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2122 return u8;
2123# else
2124 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2125 if (rcStrict == VINF_SUCCESS)
2126 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2127 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2128# endif
2129}
2130
2131
2132/**
2133 * Fetches the next opcode byte, longjmp on error.
2134 *
2135 * @returns The opcode byte.
2136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2137 */
2138DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2139{
2140# ifdef IEM_WITH_CODE_TLB
2141 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2142 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2143 if (RT_LIKELY( pbBuf != NULL
2144 && offBuf < pVCpu->iem.s.cbInstrBuf))
2145 {
2146 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2147 return pbBuf[offBuf];
2148 }
2149# else
2150 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2151 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2152 {
2153 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2154 return pVCpu->iem.s.abOpcode[offOpcode];
2155 }
2156# endif
2157 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2158}
2159
2160#endif /* IEM_WITH_SETJMP */
2161
2162/**
2163 * Fetches the next opcode byte, returns automatically on failure.
2164 *
2165 * @param a_pu8 Where to return the opcode byte.
2166 * @remark Implicitly references pVCpu.
2167 */
2168#ifndef IEM_WITH_SETJMP
2169# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2170 do \
2171 { \
2172 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2173 if (rcStrict2 == VINF_SUCCESS) \
2174 { /* likely */ } \
2175 else \
2176 return rcStrict2; \
2177 } while (0)
2178#else
2179# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2180#endif /* IEM_WITH_SETJMP */
2181
2182
2183#ifndef IEM_WITH_SETJMP
2184/**
2185 * Fetches the next signed byte from the opcode stream.
2186 *
2187 * @returns Strict VBox status code.
2188 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2189 * @param pi8 Where to return the signed byte.
2190 */
2191DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2192{
2193 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2194}
2195#endif /* !IEM_WITH_SETJMP */
2196
2197
2198/**
2199 * Fetches the next signed byte from the opcode stream, returning automatically
2200 * on failure.
2201 *
2202 * @param a_pi8 Where to return the signed byte.
2203 * @remark Implicitly references pVCpu.
2204 */
2205#ifndef IEM_WITH_SETJMP
2206# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2207 do \
2208 { \
2209 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2210 if (rcStrict2 != VINF_SUCCESS) \
2211 return rcStrict2; \
2212 } while (0)
2213#else /* IEM_WITH_SETJMP */
2214# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2215
2216#endif /* IEM_WITH_SETJMP */
2217
2218#ifndef IEM_WITH_SETJMP
2219
2220/**
2221 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2222 *
2223 * @returns Strict VBox status code.
2224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2225 * @param pu16 Where to return the opcode dword.
2226 */
2227DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2228{
2229 uint8_t u8;
2230 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2231 if (rcStrict == VINF_SUCCESS)
2232 *pu16 = (int8_t)u8;
2233 return rcStrict;
2234}
2235
2236
2237/**
2238 * Fetches the next signed byte from the opcode stream, extending it to
2239 * unsigned 16-bit.
2240 *
2241 * @returns Strict VBox status code.
2242 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2243 * @param pu16 Where to return the unsigned word.
2244 */
2245DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2246{
2247 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2248 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2249 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2250
2251 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2252 pVCpu->iem.s.offOpcode = offOpcode + 1;
2253 return VINF_SUCCESS;
2254}
2255
2256#endif /* !IEM_WITH_SETJMP */
2257
2258/**
2259 * Fetches the next signed byte from the opcode stream and sign-extending it to
2260 * a word, returning automatically on failure.
2261 *
2262 * @param a_pu16 Where to return the word.
2263 * @remark Implicitly references pVCpu.
2264 */
2265#ifndef IEM_WITH_SETJMP
2266# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2267 do \
2268 { \
2269 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2270 if (rcStrict2 != VINF_SUCCESS) \
2271 return rcStrict2; \
2272 } while (0)
2273#else
2274# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2275#endif
2276
2277#ifndef IEM_WITH_SETJMP
2278
2279/**
2280 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2281 *
2282 * @returns Strict VBox status code.
2283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2284 * @param pu32 Where to return the opcode dword.
2285 */
2286DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2287{
2288 uint8_t u8;
2289 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2290 if (rcStrict == VINF_SUCCESS)
2291 *pu32 = (int8_t)u8;
2292 return rcStrict;
2293}
2294
2295
2296/**
2297 * Fetches the next signed byte from the opcode stream, extending it to
2298 * unsigned 32-bit.
2299 *
2300 * @returns Strict VBox status code.
2301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2302 * @param pu32 Where to return the unsigned dword.
2303 */
2304DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2305{
2306 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2307 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2308 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2309
2310 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2311 pVCpu->iem.s.offOpcode = offOpcode + 1;
2312 return VINF_SUCCESS;
2313}
2314
2315#endif /* !IEM_WITH_SETJMP */
2316
2317/**
2318 * Fetches the next signed byte from the opcode stream and sign-extending it to
2319 * a word, returning automatically on failure.
2320 *
2321 * @param a_pu32 Where to return the word.
2322 * @remark Implicitly references pVCpu.
2323 */
2324#ifndef IEM_WITH_SETJMP
2325#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2326 do \
2327 { \
2328 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2329 if (rcStrict2 != VINF_SUCCESS) \
2330 return rcStrict2; \
2331 } while (0)
2332#else
2333# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2334#endif
2335
2336#ifndef IEM_WITH_SETJMP
2337
2338/**
2339 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2340 *
2341 * @returns Strict VBox status code.
2342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2343 * @param pu64 Where to return the opcode qword.
2344 */
2345DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2346{
2347 uint8_t u8;
2348 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2349 if (rcStrict == VINF_SUCCESS)
2350 *pu64 = (int8_t)u8;
2351 return rcStrict;
2352}
2353
2354
2355/**
2356 * Fetches the next signed byte from the opcode stream, extending it to
2357 * unsigned 64-bit.
2358 *
2359 * @returns Strict VBox status code.
2360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2361 * @param pu64 Where to return the unsigned qword.
2362 */
2363DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2364{
2365 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2366 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2367 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2368
2369 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2370 pVCpu->iem.s.offOpcode = offOpcode + 1;
2371 return VINF_SUCCESS;
2372}
2373
2374#endif /* !IEM_WITH_SETJMP */
2375
2376
2377/**
2378 * Fetches the next signed byte from the opcode stream and sign-extending it to
2379 * a word, returning automatically on failure.
2380 *
2381 * @param a_pu64 Where to return the word.
2382 * @remark Implicitly references pVCpu.
2383 */
2384#ifndef IEM_WITH_SETJMP
2385# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2386 do \
2387 { \
2388 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2389 if (rcStrict2 != VINF_SUCCESS) \
2390 return rcStrict2; \
2391 } while (0)
2392#else
2393# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2394#endif
2395
2396
2397#ifndef IEM_WITH_SETJMP
2398/**
2399 * Fetches the next opcode byte.
2400 *
2401 * @returns Strict VBox status code.
2402 * @param pVCpu The cross context virtual CPU structure of the
2403 * calling thread.
2404 * @param pu8 Where to return the opcode byte.
2405 */
2406DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2407{
2408 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2409 pVCpu->iem.s.offModRm = offOpcode;
2410 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2411 {
2412 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2413 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2414 return VINF_SUCCESS;
2415 }
2416 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2417}
2418#else /* IEM_WITH_SETJMP */
2419/**
2420 * Fetches the next opcode byte, longjmp on error.
2421 *
2422 * @returns The opcode byte.
2423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2424 */
2425DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2426{
2427# ifdef IEM_WITH_CODE_TLB
2428 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2429 pVCpu->iem.s.offModRm = offBuf;
2430 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2431 if (RT_LIKELY( pbBuf != NULL
2432 && offBuf < pVCpu->iem.s.cbInstrBuf))
2433 {
2434 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2435 return pbBuf[offBuf];
2436 }
2437# else
2438 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2439 pVCpu->iem.s.offModRm = offOpcode;
2440 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2441 {
2442 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2443 return pVCpu->iem.s.abOpcode[offOpcode];
2444 }
2445# endif
2446 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2447}
2448#endif /* IEM_WITH_SETJMP */
2449
2450/**
2451 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2452 * on failure.
2453 *
2454 * Will note down the position of the ModR/M byte for VT-x exits.
2455 *
2456 * @param a_pbRm Where to return the RM opcode byte.
2457 * @remark Implicitly references pVCpu.
2458 */
2459#ifndef IEM_WITH_SETJMP
2460# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2461 do \
2462 { \
2463 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2464 if (rcStrict2 == VINF_SUCCESS) \
2465 { /* likely */ } \
2466 else \
2467 return rcStrict2; \
2468 } while (0)
2469#else
2470# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2471#endif /* IEM_WITH_SETJMP */
2472
2473
2474#ifndef IEM_WITH_SETJMP
2475
2476/**
2477 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2478 *
2479 * @returns Strict VBox status code.
2480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2481 * @param pu16 Where to return the opcode word.
2482 */
2483DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2484{
2485 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2486 if (rcStrict == VINF_SUCCESS)
2487 {
2488 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2489# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2490 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2491# else
2492 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2493# endif
2494 pVCpu->iem.s.offOpcode = offOpcode + 2;
2495 }
2496 else
2497 *pu16 = 0;
2498 return rcStrict;
2499}
2500
2501
2502/**
2503 * Fetches the next opcode word.
2504 *
2505 * @returns Strict VBox status code.
2506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2507 * @param pu16 Where to return the opcode word.
2508 */
2509DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2510{
2511 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2512 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2513 {
2514 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2515# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2516 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2517# else
2518 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2519# endif
2520 return VINF_SUCCESS;
2521 }
2522 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2523}
2524
2525#else /* IEM_WITH_SETJMP */
2526
2527/**
2528 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2529 *
2530 * @returns The opcode word.
2531 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2532 */
2533DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2534{
2535# ifdef IEM_WITH_CODE_TLB
2536 uint16_t u16;
2537 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2538 return u16;
2539# else
2540 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2541 if (rcStrict == VINF_SUCCESS)
2542 {
2543 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2544 pVCpu->iem.s.offOpcode += 2;
2545# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2546 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2547# else
2548 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2549# endif
2550 }
2551 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2552# endif
2553}
2554
2555
2556/**
2557 * Fetches the next opcode word, longjmp on error.
2558 *
2559 * @returns The opcode word.
2560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2561 */
2562DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2563{
2564# ifdef IEM_WITH_CODE_TLB
2565 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2566 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2567 if (RT_LIKELY( pbBuf != NULL
2568 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2569 {
2570 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2571# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2572 return *(uint16_t const *)&pbBuf[offBuf];
2573# else
2574 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2575# endif
2576 }
2577# else
2578 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2579 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2580 {
2581 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2582# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2583 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2584# else
2585 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2586# endif
2587 }
2588# endif
2589 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2590}
2591
2592#endif /* IEM_WITH_SETJMP */
2593
2594
2595/**
2596 * Fetches the next opcode word, returns automatically on failure.
2597 *
2598 * @param a_pu16 Where to return the opcode word.
2599 * @remark Implicitly references pVCpu.
2600 */
2601#ifndef IEM_WITH_SETJMP
2602# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2603 do \
2604 { \
2605 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2606 if (rcStrict2 != VINF_SUCCESS) \
2607 return rcStrict2; \
2608 } while (0)
2609#else
2610# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2611#endif
2612
2613#ifndef IEM_WITH_SETJMP
2614
2615/**
2616 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2617 *
2618 * @returns Strict VBox status code.
2619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2620 * @param pu32 Where to return the opcode double word.
2621 */
2622DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2623{
2624 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2625 if (rcStrict == VINF_SUCCESS)
2626 {
2627 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2628 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2629 pVCpu->iem.s.offOpcode = offOpcode + 2;
2630 }
2631 else
2632 *pu32 = 0;
2633 return rcStrict;
2634}
2635
2636
2637/**
2638 * Fetches the next opcode word, zero extending it to a double word.
2639 *
2640 * @returns Strict VBox status code.
2641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2642 * @param pu32 Where to return the opcode double word.
2643 */
2644DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2645{
2646 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2647 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2648 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2649
2650 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2651 pVCpu->iem.s.offOpcode = offOpcode + 2;
2652 return VINF_SUCCESS;
2653}
2654
2655#endif /* !IEM_WITH_SETJMP */
2656
2657
2658/**
2659 * Fetches the next opcode word and zero extends it to a double word, returns
2660 * automatically on failure.
2661 *
2662 * @param a_pu32 Where to return the opcode double word.
2663 * @remark Implicitly references pVCpu.
2664 */
2665#ifndef IEM_WITH_SETJMP
2666# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2667 do \
2668 { \
2669 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2670 if (rcStrict2 != VINF_SUCCESS) \
2671 return rcStrict2; \
2672 } while (0)
2673#else
2674# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2675#endif
2676
2677#ifndef IEM_WITH_SETJMP
2678
2679/**
2680 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2681 *
2682 * @returns Strict VBox status code.
2683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2684 * @param pu64 Where to return the opcode quad word.
2685 */
2686DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2687{
2688 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2689 if (rcStrict == VINF_SUCCESS)
2690 {
2691 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2692 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2693 pVCpu->iem.s.offOpcode = offOpcode + 2;
2694 }
2695 else
2696 *pu64 = 0;
2697 return rcStrict;
2698}
2699
2700
2701/**
2702 * Fetches the next opcode word, zero extending it to a quad word.
2703 *
2704 * @returns Strict VBox status code.
2705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2706 * @param pu64 Where to return the opcode quad word.
2707 */
2708DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2709{
2710 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2711 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2712 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2713
2714 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2715 pVCpu->iem.s.offOpcode = offOpcode + 2;
2716 return VINF_SUCCESS;
2717}
2718
2719#endif /* !IEM_WITH_SETJMP */
2720
2721/**
2722 * Fetches the next opcode word and zero extends it to a quad word, returns
2723 * automatically on failure.
2724 *
2725 * @param a_pu64 Where to return the opcode quad word.
2726 * @remark Implicitly references pVCpu.
2727 */
2728#ifndef IEM_WITH_SETJMP
2729# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2730 do \
2731 { \
2732 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2733 if (rcStrict2 != VINF_SUCCESS) \
2734 return rcStrict2; \
2735 } while (0)
2736#else
2737# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2738#endif
2739
2740
2741#ifndef IEM_WITH_SETJMP
2742/**
2743 * Fetches the next signed word from the opcode stream.
2744 *
2745 * @returns Strict VBox status code.
2746 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2747 * @param pi16 Where to return the signed word.
2748 */
2749DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2750{
2751 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2752}
2753#endif /* !IEM_WITH_SETJMP */
2754
2755
2756/**
2757 * Fetches the next signed word from the opcode stream, returning automatically
2758 * on failure.
2759 *
2760 * @param a_pi16 Where to return the signed word.
2761 * @remark Implicitly references pVCpu.
2762 */
2763#ifndef IEM_WITH_SETJMP
2764# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2765 do \
2766 { \
2767 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2768 if (rcStrict2 != VINF_SUCCESS) \
2769 return rcStrict2; \
2770 } while (0)
2771#else
2772# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2773#endif
2774
2775#ifndef IEM_WITH_SETJMP
2776
2777/**
2778 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2779 *
2780 * @returns Strict VBox status code.
2781 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2782 * @param pu32 Where to return the opcode dword.
2783 */
2784DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2785{
2786 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2787 if (rcStrict == VINF_SUCCESS)
2788 {
2789 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2790# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2791 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2792# else
2793 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2794 pVCpu->iem.s.abOpcode[offOpcode + 1],
2795 pVCpu->iem.s.abOpcode[offOpcode + 2],
2796 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2797# endif
2798 pVCpu->iem.s.offOpcode = offOpcode + 4;
2799 }
2800 else
2801 *pu32 = 0;
2802 return rcStrict;
2803}
2804
2805
2806/**
2807 * Fetches the next opcode dword.
2808 *
2809 * @returns Strict VBox status code.
2810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2811 * @param pu32 Where to return the opcode double word.
2812 */
2813DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2814{
2815 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2816 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2817 {
2818 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2819# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2820 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2821# else
2822 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2823 pVCpu->iem.s.abOpcode[offOpcode + 1],
2824 pVCpu->iem.s.abOpcode[offOpcode + 2],
2825 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2826# endif
2827 return VINF_SUCCESS;
2828 }
2829 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2830}
2831
2832#else /* !IEM_WITH_SETJMP */
2833
2834/**
2835 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2836 *
2837 * @returns The opcode dword.
2838 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2839 */
2840DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2841{
2842# ifdef IEM_WITH_CODE_TLB
2843 uint32_t u32;
2844 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2845 return u32;
2846# else
2847 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2848 if (rcStrict == VINF_SUCCESS)
2849 {
2850 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2851 pVCpu->iem.s.offOpcode = offOpcode + 4;
2852# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2853 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2854# else
2855 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2856 pVCpu->iem.s.abOpcode[offOpcode + 1],
2857 pVCpu->iem.s.abOpcode[offOpcode + 2],
2858 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2859# endif
2860 }
2861 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2862# endif
2863}
2864
2865
2866/**
2867 * Fetches the next opcode dword, longjmp on error.
2868 *
2869 * @returns The opcode dword.
2870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2871 */
2872DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2873{
2874# ifdef IEM_WITH_CODE_TLB
2875 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2876 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2877 if (RT_LIKELY( pbBuf != NULL
2878 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2879 {
2880 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2881# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2882 return *(uint32_t const *)&pbBuf[offBuf];
2883# else
2884 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2885 pbBuf[offBuf + 1],
2886 pbBuf[offBuf + 2],
2887 pbBuf[offBuf + 3]);
2888# endif
2889 }
2890# else
2891 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2892 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2893 {
2894 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2895# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2896 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2897# else
2898 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2899 pVCpu->iem.s.abOpcode[offOpcode + 1],
2900 pVCpu->iem.s.abOpcode[offOpcode + 2],
2901 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2902# endif
2903 }
2904# endif
2905 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2906}
2907
2908#endif /* !IEM_WITH_SETJMP */
2909
2910
2911/**
2912 * Fetches the next opcode dword, returns automatically on failure.
2913 *
2914 * @param a_pu32 Where to return the opcode dword.
2915 * @remark Implicitly references pVCpu.
2916 */
2917#ifndef IEM_WITH_SETJMP
2918# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2919 do \
2920 { \
2921 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2922 if (rcStrict2 != VINF_SUCCESS) \
2923 return rcStrict2; \
2924 } while (0)
2925#else
2926# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2927#endif
2928
2929#ifndef IEM_WITH_SETJMP
2930
2931/**
2932 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2933 *
2934 * @returns Strict VBox status code.
2935 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2936 * @param pu64 Where to return the opcode dword.
2937 */
2938DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2939{
2940 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2941 if (rcStrict == VINF_SUCCESS)
2942 {
2943 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2944 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2945 pVCpu->iem.s.abOpcode[offOpcode + 1],
2946 pVCpu->iem.s.abOpcode[offOpcode + 2],
2947 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2948 pVCpu->iem.s.offOpcode = offOpcode + 4;
2949 }
2950 else
2951 *pu64 = 0;
2952 return rcStrict;
2953}
2954
2955
2956/**
2957 * Fetches the next opcode dword, zero extending it to a quad word.
2958 *
2959 * @returns Strict VBox status code.
2960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2961 * @param pu64 Where to return the opcode quad word.
2962 */
2963DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2964{
2965 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2966 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2967 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2968
2969 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2970 pVCpu->iem.s.abOpcode[offOpcode + 1],
2971 pVCpu->iem.s.abOpcode[offOpcode + 2],
2972 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2973 pVCpu->iem.s.offOpcode = offOpcode + 4;
2974 return VINF_SUCCESS;
2975}
2976
2977#endif /* !IEM_WITH_SETJMP */
2978
2979
2980/**
2981 * Fetches the next opcode dword and zero extends it to a quad word, returns
2982 * automatically on failure.
2983 *
2984 * @param a_pu64 Where to return the opcode quad word.
2985 * @remark Implicitly references pVCpu.
2986 */
2987#ifndef IEM_WITH_SETJMP
2988# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2989 do \
2990 { \
2991 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2992 if (rcStrict2 != VINF_SUCCESS) \
2993 return rcStrict2; \
2994 } while (0)
2995#else
2996# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2997#endif
2998
2999
3000#ifndef IEM_WITH_SETJMP
3001/**
3002 * Fetches the next signed double word from the opcode stream.
3003 *
3004 * @returns Strict VBox status code.
3005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3006 * @param pi32 Where to return the signed double word.
3007 */
3008DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3009{
3010 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3011}
3012#endif
3013
3014/**
3015 * Fetches the next signed double word from the opcode stream, returning
3016 * automatically on failure.
3017 *
3018 * @param a_pi32 Where to return the signed double word.
3019 * @remark Implicitly references pVCpu.
3020 */
3021#ifndef IEM_WITH_SETJMP
3022# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3023 do \
3024 { \
3025 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3026 if (rcStrict2 != VINF_SUCCESS) \
3027 return rcStrict2; \
3028 } while (0)
3029#else
3030# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3031#endif
3032
3033#ifndef IEM_WITH_SETJMP
3034
3035/**
3036 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3037 *
3038 * @returns Strict VBox status code.
3039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3040 * @param pu64 Where to return the opcode qword.
3041 */
3042DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3043{
3044 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3045 if (rcStrict == VINF_SUCCESS)
3046 {
3047 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3048 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3049 pVCpu->iem.s.abOpcode[offOpcode + 1],
3050 pVCpu->iem.s.abOpcode[offOpcode + 2],
3051 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3052 pVCpu->iem.s.offOpcode = offOpcode + 4;
3053 }
3054 else
3055 *pu64 = 0;
3056 return rcStrict;
3057}
3058
3059
3060/**
3061 * Fetches the next opcode dword, sign extending it into a quad word.
3062 *
3063 * @returns Strict VBox status code.
3064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3065 * @param pu64 Where to return the opcode quad word.
3066 */
3067DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3068{
3069 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3070 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3071 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3072
3073 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3074 pVCpu->iem.s.abOpcode[offOpcode + 1],
3075 pVCpu->iem.s.abOpcode[offOpcode + 2],
3076 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3077 *pu64 = i32;
3078 pVCpu->iem.s.offOpcode = offOpcode + 4;
3079 return VINF_SUCCESS;
3080}
3081
3082#endif /* !IEM_WITH_SETJMP */
3083
3084
3085/**
3086 * Fetches the next opcode double word and sign extends it to a quad word,
3087 * returns automatically on failure.
3088 *
3089 * @param a_pu64 Where to return the opcode quad word.
3090 * @remark Implicitly references pVCpu.
3091 */
3092#ifndef IEM_WITH_SETJMP
3093# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3094 do \
3095 { \
3096 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3097 if (rcStrict2 != VINF_SUCCESS) \
3098 return rcStrict2; \
3099 } while (0)
3100#else
3101# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3102#endif
3103
3104#ifndef IEM_WITH_SETJMP
3105
3106/**
3107 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3108 *
3109 * @returns Strict VBox status code.
3110 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3111 * @param pu64 Where to return the opcode qword.
3112 */
3113DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3114{
3115 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3116 if (rcStrict == VINF_SUCCESS)
3117 {
3118 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3119# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3120 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3121# else
3122 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3123 pVCpu->iem.s.abOpcode[offOpcode + 1],
3124 pVCpu->iem.s.abOpcode[offOpcode + 2],
3125 pVCpu->iem.s.abOpcode[offOpcode + 3],
3126 pVCpu->iem.s.abOpcode[offOpcode + 4],
3127 pVCpu->iem.s.abOpcode[offOpcode + 5],
3128 pVCpu->iem.s.abOpcode[offOpcode + 6],
3129 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3130# endif
3131 pVCpu->iem.s.offOpcode = offOpcode + 8;
3132 }
3133 else
3134 *pu64 = 0;
3135 return rcStrict;
3136}
3137
3138
3139/**
3140 * Fetches the next opcode qword.
3141 *
3142 * @returns Strict VBox status code.
3143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3144 * @param pu64 Where to return the opcode qword.
3145 */
3146DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3147{
3148 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3149 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3150 {
3151# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3152 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3153# else
3154 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3155 pVCpu->iem.s.abOpcode[offOpcode + 1],
3156 pVCpu->iem.s.abOpcode[offOpcode + 2],
3157 pVCpu->iem.s.abOpcode[offOpcode + 3],
3158 pVCpu->iem.s.abOpcode[offOpcode + 4],
3159 pVCpu->iem.s.abOpcode[offOpcode + 5],
3160 pVCpu->iem.s.abOpcode[offOpcode + 6],
3161 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3162# endif
3163 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3164 return VINF_SUCCESS;
3165 }
3166 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3167}
3168
3169#else /* IEM_WITH_SETJMP */
3170
3171/**
3172 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3173 *
3174 * @returns The opcode qword.
3175 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3176 */
3177DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3178{
3179# ifdef IEM_WITH_CODE_TLB
3180 uint64_t u64;
3181 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3182 return u64;
3183# else
3184 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3185 if (rcStrict == VINF_SUCCESS)
3186 {
3187 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3188 pVCpu->iem.s.offOpcode = offOpcode + 8;
3189# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3190 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3191# else
3192 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3193 pVCpu->iem.s.abOpcode[offOpcode + 1],
3194 pVCpu->iem.s.abOpcode[offOpcode + 2],
3195 pVCpu->iem.s.abOpcode[offOpcode + 3],
3196 pVCpu->iem.s.abOpcode[offOpcode + 4],
3197 pVCpu->iem.s.abOpcode[offOpcode + 5],
3198 pVCpu->iem.s.abOpcode[offOpcode + 6],
3199 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3200# endif
3201 }
3202 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3203# endif
3204}
3205
3206
3207/**
3208 * Fetches the next opcode qword, longjmp on error.
3209 *
3210 * @returns The opcode qword.
3211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3212 */
3213DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3214{
3215# ifdef IEM_WITH_CODE_TLB
3216 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3217 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3218 if (RT_LIKELY( pbBuf != NULL
3219 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3220 {
3221 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3222# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3223 return *(uint64_t const *)&pbBuf[offBuf];
3224# else
3225 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3226 pbBuf[offBuf + 1],
3227 pbBuf[offBuf + 2],
3228 pbBuf[offBuf + 3],
3229 pbBuf[offBuf + 4],
3230 pbBuf[offBuf + 5],
3231 pbBuf[offBuf + 6],
3232 pbBuf[offBuf + 7]);
3233# endif
3234 }
3235# else
3236 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3237 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3238 {
3239 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3240# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3241 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3242# else
3243 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3244 pVCpu->iem.s.abOpcode[offOpcode + 1],
3245 pVCpu->iem.s.abOpcode[offOpcode + 2],
3246 pVCpu->iem.s.abOpcode[offOpcode + 3],
3247 pVCpu->iem.s.abOpcode[offOpcode + 4],
3248 pVCpu->iem.s.abOpcode[offOpcode + 5],
3249 pVCpu->iem.s.abOpcode[offOpcode + 6],
3250 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3251# endif
3252 }
3253# endif
3254 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3255}
3256
3257#endif /* IEM_WITH_SETJMP */
3258
3259/**
3260 * Fetches the next opcode quad word, returns automatically on failure.
3261 *
3262 * @param a_pu64 Where to return the opcode quad word.
3263 * @remark Implicitly references pVCpu.
3264 */
3265#ifndef IEM_WITH_SETJMP
3266# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3267 do \
3268 { \
3269 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3270 if (rcStrict2 != VINF_SUCCESS) \
3271 return rcStrict2; \
3272 } while (0)
3273#else
3274# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3275#endif
3276
3277
3278/** @name Misc Worker Functions.
3279 * @{
3280 */
3281
3282/**
3283 * Gets the exception class for the specified exception vector.
3284 *
3285 * @returns The class of the specified exception.
3286 * @param uVector The exception vector.
3287 */
3288IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3289{
3290 Assert(uVector <= X86_XCPT_LAST);
3291 switch (uVector)
3292 {
3293 case X86_XCPT_DE:
3294 case X86_XCPT_TS:
3295 case X86_XCPT_NP:
3296 case X86_XCPT_SS:
3297 case X86_XCPT_GP:
3298 case X86_XCPT_SX: /* AMD only */
3299 return IEMXCPTCLASS_CONTRIBUTORY;
3300
3301 case X86_XCPT_PF:
3302 case X86_XCPT_VE: /* Intel only */
3303 return IEMXCPTCLASS_PAGE_FAULT;
3304
3305 case X86_XCPT_DF:
3306 return IEMXCPTCLASS_DOUBLE_FAULT;
3307 }
3308 return IEMXCPTCLASS_BENIGN;
3309}
3310
3311
3312/**
3313 * Evaluates how to handle an exception caused during delivery of another event
3314 * (exception / interrupt).
3315 *
3316 * @returns How to handle the recursive exception.
3317 * @param pVCpu The cross context virtual CPU structure of the
3318 * calling thread.
3319 * @param fPrevFlags The flags of the previous event.
3320 * @param uPrevVector The vector of the previous event.
3321 * @param fCurFlags The flags of the current exception.
3322 * @param uCurVector The vector of the current exception.
3323 * @param pfXcptRaiseInfo Where to store additional information about the
3324 * exception condition. Optional.
3325 */
3326VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3327 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3328{
3329 /*
3330 * Only CPU exceptions can be raised while delivering other events, software interrupt
3331 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3332 */
3333 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3334 Assert(pVCpu); RT_NOREF(pVCpu);
3335 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3336
3337 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3338 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3339 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3340 {
3341 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3342 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3343 {
3344 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3345 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3346 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3347 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3348 {
3349 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3350 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3351 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3352 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3353 uCurVector, pVCpu->cpum.GstCtx.cr2));
3354 }
3355 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3356 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3357 {
3358 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3359 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3360 }
3361 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3362 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3363 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3364 {
3365 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3366 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3367 }
3368 }
3369 else
3370 {
3371 if (uPrevVector == X86_XCPT_NMI)
3372 {
3373 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3374 if (uCurVector == X86_XCPT_PF)
3375 {
3376 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3377 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3378 }
3379 }
3380 else if ( uPrevVector == X86_XCPT_AC
3381 && uCurVector == X86_XCPT_AC)
3382 {
3383 enmRaise = IEMXCPTRAISE_CPU_HANG;
3384 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3385 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3386 }
3387 }
3388 }
3389 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3390 {
3391 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3392 if (uCurVector == X86_XCPT_PF)
3393 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3394 }
3395 else
3396 {
3397 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3398 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3399 }
3400
3401 if (pfXcptRaiseInfo)
3402 *pfXcptRaiseInfo = fRaiseInfo;
3403 return enmRaise;
3404}
3405
3406
3407/**
3408 * Enters the CPU shutdown state initiated by a triple fault or other
3409 * unrecoverable conditions.
3410 *
3411 * @returns Strict VBox status code.
3412 * @param pVCpu The cross context virtual CPU structure of the
3413 * calling thread.
3414 */
3415IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3416{
3417 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3418 {
3419 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3420 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3421 }
3422
3423 RT_NOREF(pVCpu);
3424 return VINF_EM_TRIPLE_FAULT;
3425}
3426
3427
3428/**
3429 * Validates a new SS segment.
3430 *
3431 * @returns VBox strict status code.
3432 * @param pVCpu The cross context virtual CPU structure of the
3433 * calling thread.
3434 * @param NewSS The new SS selctor.
3435 * @param uCpl The CPL to load the stack for.
3436 * @param pDesc Where to return the descriptor.
3437 */
3438IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3439{
3440 /* Null selectors are not allowed (we're not called for dispatching
3441 interrupts with SS=0 in long mode). */
3442 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3443 {
3444 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3445 return iemRaiseTaskSwitchFault0(pVCpu);
3446 }
3447
3448 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3449 if ((NewSS & X86_SEL_RPL) != uCpl)
3450 {
3451 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3452 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3453 }
3454
3455 /*
3456 * Read the descriptor.
3457 */
3458 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3459 if (rcStrict != VINF_SUCCESS)
3460 return rcStrict;
3461
3462 /*
3463 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3464 */
3465 if (!pDesc->Legacy.Gen.u1DescType)
3466 {
3467 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3468 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3469 }
3470
3471 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3472 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3473 {
3474 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3475 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3476 }
3477 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3478 {
3479 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3480 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3481 }
3482
3483 /* Is it there? */
3484 /** @todo testcase: Is this checked before the canonical / limit check below? */
3485 if (!pDesc->Legacy.Gen.u1Present)
3486 {
3487 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3488 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3489 }
3490
3491 return VINF_SUCCESS;
3492}
3493
3494
3495/**
3496 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3497 * not.
3498 *
3499 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3500 */
3501#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3502# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3503#else
3504# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3505#endif
3506
3507/**
3508 * Updates the EFLAGS in the correct manner wrt. PATM.
3509 *
3510 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3511 * @param a_fEfl The new EFLAGS.
3512 */
3513#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3514# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3515#else
3516# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3517#endif
3518
3519
3520/** @} */
3521
3522/** @name Raising Exceptions.
3523 *
3524 * @{
3525 */
3526
3527
3528/**
3529 * Loads the specified stack far pointer from the TSS.
3530 *
3531 * @returns VBox strict status code.
3532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3533 * @param uCpl The CPL to load the stack for.
3534 * @param pSelSS Where to return the new stack segment.
3535 * @param puEsp Where to return the new stack pointer.
3536 */
3537IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3538{
3539 VBOXSTRICTRC rcStrict;
3540 Assert(uCpl < 4);
3541
3542 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3543 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3544 {
3545 /*
3546 * 16-bit TSS (X86TSS16).
3547 */
3548 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3549 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3550 {
3551 uint32_t off = uCpl * 4 + 2;
3552 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3553 {
3554 /** @todo check actual access pattern here. */
3555 uint32_t u32Tmp = 0; /* gcc maybe... */
3556 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3557 if (rcStrict == VINF_SUCCESS)
3558 {
3559 *puEsp = RT_LOWORD(u32Tmp);
3560 *pSelSS = RT_HIWORD(u32Tmp);
3561 return VINF_SUCCESS;
3562 }
3563 }
3564 else
3565 {
3566 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3567 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3568 }
3569 break;
3570 }
3571
3572 /*
3573 * 32-bit TSS (X86TSS32).
3574 */
3575 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3576 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3577 {
3578 uint32_t off = uCpl * 8 + 4;
3579 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3580 {
3581/** @todo check actual access pattern here. */
3582 uint64_t u64Tmp;
3583 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3584 if (rcStrict == VINF_SUCCESS)
3585 {
3586 *puEsp = u64Tmp & UINT32_MAX;
3587 *pSelSS = (RTSEL)(u64Tmp >> 32);
3588 return VINF_SUCCESS;
3589 }
3590 }
3591 else
3592 {
3593 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3594 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3595 }
3596 break;
3597 }
3598
3599 default:
3600 AssertFailed();
3601 rcStrict = VERR_IEM_IPE_4;
3602 break;
3603 }
3604
3605 *puEsp = 0; /* make gcc happy */
3606 *pSelSS = 0; /* make gcc happy */
3607 return rcStrict;
3608}
3609
3610
3611/**
3612 * Loads the specified stack pointer from the 64-bit TSS.
3613 *
3614 * @returns VBox strict status code.
3615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3616 * @param uCpl The CPL to load the stack for.
3617 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3618 * @param puRsp Where to return the new stack pointer.
3619 */
3620IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3621{
3622 Assert(uCpl < 4);
3623 Assert(uIst < 8);
3624 *puRsp = 0; /* make gcc happy */
3625
3626 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3627 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3628
3629 uint32_t off;
3630 if (uIst)
3631 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3632 else
3633 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3634 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3635 {
3636 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3637 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3638 }
3639
3640 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3641}
3642
3643
3644/**
3645 * Adjust the CPU state according to the exception being raised.
3646 *
3647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3648 * @param u8Vector The exception that has been raised.
3649 */
3650DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3651{
3652 switch (u8Vector)
3653 {
3654 case X86_XCPT_DB:
3655 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3656 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3657 break;
3658 /** @todo Read the AMD and Intel exception reference... */
3659 }
3660}
3661
3662
3663/**
3664 * Implements exceptions and interrupts for real mode.
3665 *
3666 * @returns VBox strict status code.
3667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3668 * @param cbInstr The number of bytes to offset rIP by in the return
3669 * address.
3670 * @param u8Vector The interrupt / exception vector number.
3671 * @param fFlags The flags.
3672 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3673 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3674 */
3675IEM_STATIC VBOXSTRICTRC
3676iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3677 uint8_t cbInstr,
3678 uint8_t u8Vector,
3679 uint32_t fFlags,
3680 uint16_t uErr,
3681 uint64_t uCr2)
3682{
3683 NOREF(uErr); NOREF(uCr2);
3684 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3685
3686 /*
3687 * Read the IDT entry.
3688 */
3689 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3690 {
3691 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3693 }
3694 RTFAR16 Idte;
3695 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3696 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3697 {
3698 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3699 return rcStrict;
3700 }
3701
3702 /*
3703 * Push the stack frame.
3704 */
3705 uint16_t *pu16Frame;
3706 uint64_t uNewRsp;
3707 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3708 if (rcStrict != VINF_SUCCESS)
3709 return rcStrict;
3710
3711 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3712#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3713 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3714 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3715 fEfl |= UINT16_C(0xf000);
3716#endif
3717 pu16Frame[2] = (uint16_t)fEfl;
3718 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3719 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3720 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3721 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3722 return rcStrict;
3723
3724 /*
3725 * Load the vector address into cs:ip and make exception specific state
3726 * adjustments.
3727 */
3728 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3729 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3730 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3731 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3732 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3733 pVCpu->cpum.GstCtx.rip = Idte.off;
3734 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3735 IEMMISC_SET_EFL(pVCpu, fEfl);
3736
3737 /** @todo do we actually do this in real mode? */
3738 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3739 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3740
3741 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3742}
3743
3744
3745/**
3746 * Loads a NULL data selector into when coming from V8086 mode.
3747 *
3748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3749 * @param pSReg Pointer to the segment register.
3750 */
3751IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3752{
3753 pSReg->Sel = 0;
3754 pSReg->ValidSel = 0;
3755 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3756 {
3757 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3758 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3759 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3760 }
3761 else
3762 {
3763 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3764 /** @todo check this on AMD-V */
3765 pSReg->u64Base = 0;
3766 pSReg->u32Limit = 0;
3767 }
3768}
3769
3770
3771/**
3772 * Loads a segment selector during a task switch in V8086 mode.
3773 *
3774 * @param pSReg Pointer to the segment register.
3775 * @param uSel The selector value to load.
3776 */
3777IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3778{
3779 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3780 pSReg->Sel = uSel;
3781 pSReg->ValidSel = uSel;
3782 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3783 pSReg->u64Base = uSel << 4;
3784 pSReg->u32Limit = 0xffff;
3785 pSReg->Attr.u = 0xf3;
3786}
3787
3788
3789/**
3790 * Loads a NULL data selector into a selector register, both the hidden and
3791 * visible parts, in protected mode.
3792 *
3793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3794 * @param pSReg Pointer to the segment register.
3795 * @param uRpl The RPL.
3796 */
3797IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3798{
3799 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3800 * data selector in protected mode. */
3801 pSReg->Sel = uRpl;
3802 pSReg->ValidSel = uRpl;
3803 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3804 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3805 {
3806 /* VT-x (Intel 3960x) observed doing something like this. */
3807 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3808 pSReg->u32Limit = UINT32_MAX;
3809 pSReg->u64Base = 0;
3810 }
3811 else
3812 {
3813 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3814 pSReg->u32Limit = 0;
3815 pSReg->u64Base = 0;
3816 }
3817}
3818
3819
3820/**
3821 * Loads a segment selector during a task switch in protected mode.
3822 *
3823 * In this task switch scenario, we would throw \#TS exceptions rather than
3824 * \#GPs.
3825 *
3826 * @returns VBox strict status code.
3827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3828 * @param pSReg Pointer to the segment register.
3829 * @param uSel The new selector value.
3830 *
3831 * @remarks This does _not_ handle CS or SS.
3832 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3833 */
3834IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3835{
3836 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3837
3838 /* Null data selector. */
3839 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3840 {
3841 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3842 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3843 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3844 return VINF_SUCCESS;
3845 }
3846
3847 /* Fetch the descriptor. */
3848 IEMSELDESC Desc;
3849 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3850 if (rcStrict != VINF_SUCCESS)
3851 {
3852 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3853 VBOXSTRICTRC_VAL(rcStrict)));
3854 return rcStrict;
3855 }
3856
3857 /* Must be a data segment or readable code segment. */
3858 if ( !Desc.Legacy.Gen.u1DescType
3859 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3860 {
3861 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3862 Desc.Legacy.Gen.u4Type));
3863 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3864 }
3865
3866 /* Check privileges for data segments and non-conforming code segments. */
3867 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3868 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3869 {
3870 /* The RPL and the new CPL must be less than or equal to the DPL. */
3871 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3872 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3873 {
3874 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3875 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3876 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3877 }
3878 }
3879
3880 /* Is it there? */
3881 if (!Desc.Legacy.Gen.u1Present)
3882 {
3883 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3884 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3885 }
3886
3887 /* The base and limit. */
3888 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3889 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3890
3891 /*
3892 * Ok, everything checked out fine. Now set the accessed bit before
3893 * committing the result into the registers.
3894 */
3895 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3896 {
3897 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3898 if (rcStrict != VINF_SUCCESS)
3899 return rcStrict;
3900 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3901 }
3902
3903 /* Commit */
3904 pSReg->Sel = uSel;
3905 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3906 pSReg->u32Limit = cbLimit;
3907 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3908 pSReg->ValidSel = uSel;
3909 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3910 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3911 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3912
3913 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3914 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3915 return VINF_SUCCESS;
3916}
3917
3918
3919/**
3920 * Performs a task switch.
3921 *
3922 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3923 * caller is responsible for performing the necessary checks (like DPL, TSS
3924 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3925 * reference for JMP, CALL, IRET.
3926 *
3927 * If the task switch is the due to a software interrupt or hardware exception,
3928 * the caller is responsible for validating the TSS selector and descriptor. See
3929 * Intel Instruction reference for INT n.
3930 *
3931 * @returns VBox strict status code.
3932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3933 * @param enmTaskSwitch What caused this task switch.
3934 * @param uNextEip The EIP effective after the task switch.
3935 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3936 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3937 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3938 * @param SelTSS The TSS selector of the new task.
3939 * @param pNewDescTSS Pointer to the new TSS descriptor.
3940 */
3941IEM_STATIC VBOXSTRICTRC
3942iemTaskSwitch(PVMCPU pVCpu,
3943 IEMTASKSWITCH enmTaskSwitch,
3944 uint32_t uNextEip,
3945 uint32_t fFlags,
3946 uint16_t uErr,
3947 uint64_t uCr2,
3948 RTSEL SelTSS,
3949 PIEMSELDESC pNewDescTSS)
3950{
3951 Assert(!IEM_IS_REAL_MODE(pVCpu));
3952 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3953 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3954
3955 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3956 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3957 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3958 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3959 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3960
3961 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3962 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3963
3964 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3965 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3966
3967 /* Update CR2 in case it's a page-fault. */
3968 /** @todo This should probably be done much earlier in IEM/PGM. See
3969 * @bugref{5653#c49}. */
3970 if (fFlags & IEM_XCPT_FLAGS_CR2)
3971 pVCpu->cpum.GstCtx.cr2 = uCr2;
3972
3973 /*
3974 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3975 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3976 */
3977 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3978 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3979 if (uNewTSSLimit < uNewTSSLimitMin)
3980 {
3981 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3982 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3983 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3984 }
3985
3986 /*
3987 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3988 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3989 */
3990 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3991 {
3992 uint32_t const uExitInfo1 = SelTSS;
3993 uint32_t uExitInfo2 = uErr;
3994 switch (enmTaskSwitch)
3995 {
3996 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3997 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3998 default: break;
3999 }
4000 if (fFlags & IEM_XCPT_FLAGS_ERR)
4001 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4002 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4003 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4004
4005 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4006 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4007 RT_NOREF2(uExitInfo1, uExitInfo2);
4008 }
4009 /** @todo Nested-VMX task-switch intercept. */
4010
4011 /*
4012 * Check the current TSS limit. The last written byte to the current TSS during the
4013 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4014 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4015 *
4016 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4017 * end up with smaller than "legal" TSS limits.
4018 */
4019 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4020 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4021 if (uCurTSSLimit < uCurTSSLimitMin)
4022 {
4023 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4024 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4025 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4026 }
4027
4028 /*
4029 * Verify that the new TSS can be accessed and map it. Map only the required contents
4030 * and not the entire TSS.
4031 */
4032 void *pvNewTSS;
4033 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4034 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4035 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4036 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4037 * not perform correct translation if this happens. See Intel spec. 7.2.1
4038 * "Task-State Segment" */
4039 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4040 if (rcStrict != VINF_SUCCESS)
4041 {
4042 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4043 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4044 return rcStrict;
4045 }
4046
4047 /*
4048 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4049 */
4050 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4051 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4052 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4053 {
4054 PX86DESC pDescCurTSS;
4055 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4056 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4057 if (rcStrict != VINF_SUCCESS)
4058 {
4059 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4060 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4061 return rcStrict;
4062 }
4063
4064 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4065 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4066 if (rcStrict != VINF_SUCCESS)
4067 {
4068 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4069 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4070 return rcStrict;
4071 }
4072
4073 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4074 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4075 {
4076 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4077 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4078 u32EFlags &= ~X86_EFL_NT;
4079 }
4080 }
4081
4082 /*
4083 * Save the CPU state into the current TSS.
4084 */
4085 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4086 if (GCPtrNewTSS == GCPtrCurTSS)
4087 {
4088 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4089 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4090 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ldtr.Sel));
4091 }
4092 if (fIsNewTSS386)
4093 {
4094 /*
4095 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4096 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4097 */
4098 void *pvCurTSS32;
4099 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4100 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4101 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4102 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4103 if (rcStrict != VINF_SUCCESS)
4104 {
4105 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4106 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4107 return rcStrict;
4108 }
4109
4110 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4111 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4112 pCurTSS32->eip = uNextEip;
4113 pCurTSS32->eflags = u32EFlags;
4114 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4115 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4116 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4117 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4118 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4119 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4120 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4121 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4122 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4123 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4124 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4125 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4126 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4127 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4128
4129 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4130 if (rcStrict != VINF_SUCCESS)
4131 {
4132 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4133 VBOXSTRICTRC_VAL(rcStrict)));
4134 return rcStrict;
4135 }
4136 }
4137 else
4138 {
4139 /*
4140 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4141 */
4142 void *pvCurTSS16;
4143 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4144 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4145 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4146 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4147 if (rcStrict != VINF_SUCCESS)
4148 {
4149 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4150 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4151 return rcStrict;
4152 }
4153
4154 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4155 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4156 pCurTSS16->ip = uNextEip;
4157 pCurTSS16->flags = u32EFlags;
4158 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4159 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4160 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4161 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4162 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4163 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4164 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4165 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4166 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4167 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4168 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4169 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4170
4171 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4172 if (rcStrict != VINF_SUCCESS)
4173 {
4174 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4175 VBOXSTRICTRC_VAL(rcStrict)));
4176 return rcStrict;
4177 }
4178 }
4179
4180 /*
4181 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4182 */
4183 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4184 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4185 {
4186 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4187 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4188 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4189 }
4190
4191 /*
4192 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4193 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4194 */
4195 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4196 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4197 bool fNewDebugTrap;
4198 if (fIsNewTSS386)
4199 {
4200 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4201 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4202 uNewEip = pNewTSS32->eip;
4203 uNewEflags = pNewTSS32->eflags;
4204 uNewEax = pNewTSS32->eax;
4205 uNewEcx = pNewTSS32->ecx;
4206 uNewEdx = pNewTSS32->edx;
4207 uNewEbx = pNewTSS32->ebx;
4208 uNewEsp = pNewTSS32->esp;
4209 uNewEbp = pNewTSS32->ebp;
4210 uNewEsi = pNewTSS32->esi;
4211 uNewEdi = pNewTSS32->edi;
4212 uNewES = pNewTSS32->es;
4213 uNewCS = pNewTSS32->cs;
4214 uNewSS = pNewTSS32->ss;
4215 uNewDS = pNewTSS32->ds;
4216 uNewFS = pNewTSS32->fs;
4217 uNewGS = pNewTSS32->gs;
4218 uNewLdt = pNewTSS32->selLdt;
4219 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4220 }
4221 else
4222 {
4223 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4224 uNewCr3 = 0;
4225 uNewEip = pNewTSS16->ip;
4226 uNewEflags = pNewTSS16->flags;
4227 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4228 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4229 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4230 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4231 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4232 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4233 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4234 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4235 uNewES = pNewTSS16->es;
4236 uNewCS = pNewTSS16->cs;
4237 uNewSS = pNewTSS16->ss;
4238 uNewDS = pNewTSS16->ds;
4239 uNewFS = 0;
4240 uNewGS = 0;
4241 uNewLdt = pNewTSS16->selLdt;
4242 fNewDebugTrap = false;
4243 }
4244
4245 if (GCPtrNewTSS == GCPtrCurTSS)
4246 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4247 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4248
4249 /*
4250 * We're done accessing the new TSS.
4251 */
4252 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4253 if (rcStrict != VINF_SUCCESS)
4254 {
4255 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4256 return rcStrict;
4257 }
4258
4259 /*
4260 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4261 */
4262 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4263 {
4264 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4265 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4266 if (rcStrict != VINF_SUCCESS)
4267 {
4268 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4269 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4270 return rcStrict;
4271 }
4272
4273 /* Check that the descriptor indicates the new TSS is available (not busy). */
4274 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4275 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4276 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4277
4278 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4279 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4280 if (rcStrict != VINF_SUCCESS)
4281 {
4282 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4283 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4284 return rcStrict;
4285 }
4286 }
4287
4288 /*
4289 * From this point on, we're technically in the new task. We will defer exceptions
4290 * until the completion of the task switch but before executing any instructions in the new task.
4291 */
4292 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4293 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4294 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4295 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4296 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4297 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4298 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4299
4300 /* Set the busy bit in TR. */
4301 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4302 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4303 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4304 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4305 {
4306 uNewEflags |= X86_EFL_NT;
4307 }
4308
4309 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4310 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4311 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4312
4313 pVCpu->cpum.GstCtx.eip = uNewEip;
4314 pVCpu->cpum.GstCtx.eax = uNewEax;
4315 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4316 pVCpu->cpum.GstCtx.edx = uNewEdx;
4317 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4318 pVCpu->cpum.GstCtx.esp = uNewEsp;
4319 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4320 pVCpu->cpum.GstCtx.esi = uNewEsi;
4321 pVCpu->cpum.GstCtx.edi = uNewEdi;
4322
4323 uNewEflags &= X86_EFL_LIVE_MASK;
4324 uNewEflags |= X86_EFL_RA1_MASK;
4325 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4326
4327 /*
4328 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4329 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4330 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4331 */
4332 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4333 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4334
4335 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4336 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4337
4338 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4339 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4340
4341 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4342 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4343
4344 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4345 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4346
4347 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4348 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4349 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4350
4351 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4352 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4353 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4354 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4355
4356 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4357 {
4358 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4359 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4360 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4361 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4362 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4363 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4364 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4365 }
4366
4367 /*
4368 * Switch CR3 for the new task.
4369 */
4370 if ( fIsNewTSS386
4371 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4372 {
4373 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4374 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4375 AssertRCSuccessReturn(rc, rc);
4376
4377 /* Inform PGM. */
4378 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4379 AssertRCReturn(rc, rc);
4380 /* ignore informational status codes */
4381
4382 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4383 }
4384
4385 /*
4386 * Switch LDTR for the new task.
4387 */
4388 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4389 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4390 else
4391 {
4392 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4393
4394 IEMSELDESC DescNewLdt;
4395 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4396 if (rcStrict != VINF_SUCCESS)
4397 {
4398 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4399 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4400 return rcStrict;
4401 }
4402 if ( !DescNewLdt.Legacy.Gen.u1Present
4403 || DescNewLdt.Legacy.Gen.u1DescType
4404 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4405 {
4406 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4407 uNewLdt, DescNewLdt.Legacy.u));
4408 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4409 }
4410
4411 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4412 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4413 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4414 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4415 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4416 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4417 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4418 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4419 }
4420
4421 IEMSELDESC DescSS;
4422 if (IEM_IS_V86_MODE(pVCpu))
4423 {
4424 pVCpu->iem.s.uCpl = 3;
4425 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4426 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4427 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4428 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4429 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4430 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4431
4432 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4433 DescSS.Legacy.u = 0;
4434 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4435 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4436 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4437 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4438 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4439 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4440 DescSS.Legacy.Gen.u2Dpl = 3;
4441 }
4442 else
4443 {
4444 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4445
4446 /*
4447 * Load the stack segment for the new task.
4448 */
4449 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4450 {
4451 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4452 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4453 }
4454
4455 /* Fetch the descriptor. */
4456 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4457 if (rcStrict != VINF_SUCCESS)
4458 {
4459 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4460 VBOXSTRICTRC_VAL(rcStrict)));
4461 return rcStrict;
4462 }
4463
4464 /* SS must be a data segment and writable. */
4465 if ( !DescSS.Legacy.Gen.u1DescType
4466 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4467 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4468 {
4469 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4470 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4471 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4472 }
4473
4474 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4475 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4476 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4477 {
4478 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4479 uNewCpl));
4480 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4481 }
4482
4483 /* Is it there? */
4484 if (!DescSS.Legacy.Gen.u1Present)
4485 {
4486 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4487 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4488 }
4489
4490 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4491 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4492
4493 /* Set the accessed bit before committing the result into SS. */
4494 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4495 {
4496 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4497 if (rcStrict != VINF_SUCCESS)
4498 return rcStrict;
4499 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4500 }
4501
4502 /* Commit SS. */
4503 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4504 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4505 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4506 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4507 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4508 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4509 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4510
4511 /* CPL has changed, update IEM before loading rest of segments. */
4512 pVCpu->iem.s.uCpl = uNewCpl;
4513
4514 /*
4515 * Load the data segments for the new task.
4516 */
4517 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4518 if (rcStrict != VINF_SUCCESS)
4519 return rcStrict;
4520 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4521 if (rcStrict != VINF_SUCCESS)
4522 return rcStrict;
4523 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4524 if (rcStrict != VINF_SUCCESS)
4525 return rcStrict;
4526 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4527 if (rcStrict != VINF_SUCCESS)
4528 return rcStrict;
4529
4530 /*
4531 * Load the code segment for the new task.
4532 */
4533 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4534 {
4535 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4536 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4537 }
4538
4539 /* Fetch the descriptor. */
4540 IEMSELDESC DescCS;
4541 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4542 if (rcStrict != VINF_SUCCESS)
4543 {
4544 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4545 return rcStrict;
4546 }
4547
4548 /* CS must be a code segment. */
4549 if ( !DescCS.Legacy.Gen.u1DescType
4550 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4551 {
4552 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4553 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4554 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4555 }
4556
4557 /* For conforming CS, DPL must be less than or equal to the RPL. */
4558 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4559 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4560 {
4561 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4562 DescCS.Legacy.Gen.u2Dpl));
4563 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4564 }
4565
4566 /* For non-conforming CS, DPL must match RPL. */
4567 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4568 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4569 {
4570 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4571 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4572 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4573 }
4574
4575 /* Is it there? */
4576 if (!DescCS.Legacy.Gen.u1Present)
4577 {
4578 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4579 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4580 }
4581
4582 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4583 u64Base = X86DESC_BASE(&DescCS.Legacy);
4584
4585 /* Set the accessed bit before committing the result into CS. */
4586 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4587 {
4588 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4589 if (rcStrict != VINF_SUCCESS)
4590 return rcStrict;
4591 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4592 }
4593
4594 /* Commit CS. */
4595 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4596 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4597 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4598 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4599 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4600 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4601 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4602 }
4603
4604 /** @todo Debug trap. */
4605 if (fIsNewTSS386 && fNewDebugTrap)
4606 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4607
4608 /*
4609 * Construct the error code masks based on what caused this task switch.
4610 * See Intel Instruction reference for INT.
4611 */
4612 uint16_t uExt;
4613 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4614 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4615 {
4616 uExt = 1;
4617 }
4618 else
4619 uExt = 0;
4620
4621 /*
4622 * Push any error code on to the new stack.
4623 */
4624 if (fFlags & IEM_XCPT_FLAGS_ERR)
4625 {
4626 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4627 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4628 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4629
4630 /* Check that there is sufficient space on the stack. */
4631 /** @todo Factor out segment limit checking for normal/expand down segments
4632 * into a separate function. */
4633 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4634 {
4635 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4636 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4637 {
4638 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4639 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4640 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4641 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4642 }
4643 }
4644 else
4645 {
4646 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4647 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4648 {
4649 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4650 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4651 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4652 }
4653 }
4654
4655
4656 if (fIsNewTSS386)
4657 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4658 else
4659 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4660 if (rcStrict != VINF_SUCCESS)
4661 {
4662 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4663 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4664 return rcStrict;
4665 }
4666 }
4667
4668 /* Check the new EIP against the new CS limit. */
4669 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4670 {
4671 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4672 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4673 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4674 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4675 }
4676
4677 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel));
4678 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4679}
4680
4681
4682/**
4683 * Implements exceptions and interrupts for protected mode.
4684 *
4685 * @returns VBox strict status code.
4686 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4687 * @param cbInstr The number of bytes to offset rIP by in the return
4688 * address.
4689 * @param u8Vector The interrupt / exception vector number.
4690 * @param fFlags The flags.
4691 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4692 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4693 */
4694IEM_STATIC VBOXSTRICTRC
4695iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4696 uint8_t cbInstr,
4697 uint8_t u8Vector,
4698 uint32_t fFlags,
4699 uint16_t uErr,
4700 uint64_t uCr2)
4701{
4702 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4703
4704 /*
4705 * Read the IDT entry.
4706 */
4707 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4708 {
4709 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4710 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4711 }
4712 X86DESC Idte;
4713 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4714 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4715 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4716 {
4717 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4718 return rcStrict;
4719 }
4720 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4721 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4722 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4723
4724 /*
4725 * Check the descriptor type, DPL and such.
4726 * ASSUMES this is done in the same order as described for call-gate calls.
4727 */
4728 if (Idte.Gate.u1DescType)
4729 {
4730 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4731 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4732 }
4733 bool fTaskGate = false;
4734 uint8_t f32BitGate = true;
4735 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4736 switch (Idte.Gate.u4Type)
4737 {
4738 case X86_SEL_TYPE_SYS_UNDEFINED:
4739 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4740 case X86_SEL_TYPE_SYS_LDT:
4741 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4742 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4743 case X86_SEL_TYPE_SYS_UNDEFINED2:
4744 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4745 case X86_SEL_TYPE_SYS_UNDEFINED3:
4746 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4747 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4748 case X86_SEL_TYPE_SYS_UNDEFINED4:
4749 {
4750 /** @todo check what actually happens when the type is wrong...
4751 * esp. call gates. */
4752 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4753 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4754 }
4755
4756 case X86_SEL_TYPE_SYS_286_INT_GATE:
4757 f32BitGate = false;
4758 RT_FALL_THRU();
4759 case X86_SEL_TYPE_SYS_386_INT_GATE:
4760 fEflToClear |= X86_EFL_IF;
4761 break;
4762
4763 case X86_SEL_TYPE_SYS_TASK_GATE:
4764 fTaskGate = true;
4765#ifndef IEM_IMPLEMENTS_TASKSWITCH
4766 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4767#endif
4768 break;
4769
4770 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4771 f32BitGate = false;
4772 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4773 break;
4774
4775 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4776 }
4777
4778 /* Check DPL against CPL if applicable. */
4779 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4780 {
4781 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4782 {
4783 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4784 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4785 }
4786 }
4787
4788 /* Is it there? */
4789 if (!Idte.Gate.u1Present)
4790 {
4791 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4792 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4793 }
4794
4795 /* Is it a task-gate? */
4796 if (fTaskGate)
4797 {
4798 /*
4799 * Construct the error code masks based on what caused this task switch.
4800 * See Intel Instruction reference for INT.
4801 */
4802 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4803 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4804 RTSEL SelTSS = Idte.Gate.u16Sel;
4805
4806 /*
4807 * Fetch the TSS descriptor in the GDT.
4808 */
4809 IEMSELDESC DescTSS;
4810 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4811 if (rcStrict != VINF_SUCCESS)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4814 VBOXSTRICTRC_VAL(rcStrict)));
4815 return rcStrict;
4816 }
4817
4818 /* The TSS descriptor must be a system segment and be available (not busy). */
4819 if ( DescTSS.Legacy.Gen.u1DescType
4820 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4821 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4822 {
4823 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4824 u8Vector, SelTSS, DescTSS.Legacy.au64));
4825 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4826 }
4827
4828 /* The TSS must be present. */
4829 if (!DescTSS.Legacy.Gen.u1Present)
4830 {
4831 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4832 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4833 }
4834
4835 /* Do the actual task switch. */
4836 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, pVCpu->cpum.GstCtx.eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4837 }
4838
4839 /* A null CS is bad. */
4840 RTSEL NewCS = Idte.Gate.u16Sel;
4841 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4842 {
4843 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4844 return iemRaiseGeneralProtectionFault0(pVCpu);
4845 }
4846
4847 /* Fetch the descriptor for the new CS. */
4848 IEMSELDESC DescCS;
4849 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4850 if (rcStrict != VINF_SUCCESS)
4851 {
4852 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4853 return rcStrict;
4854 }
4855
4856 /* Must be a code segment. */
4857 if (!DescCS.Legacy.Gen.u1DescType)
4858 {
4859 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4860 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4861 }
4862 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4863 {
4864 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4865 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4866 }
4867
4868 /* Don't allow lowering the privilege level. */
4869 /** @todo Does the lowering of privileges apply to software interrupts
4870 * only? This has bearings on the more-privileged or
4871 * same-privilege stack behavior further down. A testcase would
4872 * be nice. */
4873 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4874 {
4875 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4876 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4877 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4878 }
4879
4880 /* Make sure the selector is present. */
4881 if (!DescCS.Legacy.Gen.u1Present)
4882 {
4883 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4884 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4885 }
4886
4887 /* Check the new EIP against the new CS limit. */
4888 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4889 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4890 ? Idte.Gate.u16OffsetLow
4891 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4892 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4893 if (uNewEip > cbLimitCS)
4894 {
4895 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4896 u8Vector, uNewEip, cbLimitCS, NewCS));
4897 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4898 }
4899 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4900
4901 /* Calc the flag image to push. */
4902 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4903 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4904 fEfl &= ~X86_EFL_RF;
4905 else
4906 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4907
4908 /* From V8086 mode only go to CPL 0. */
4909 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4910 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4911 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4912 {
4913 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4914 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4915 }
4916
4917 /*
4918 * If the privilege level changes, we need to get a new stack from the TSS.
4919 * This in turns means validating the new SS and ESP...
4920 */
4921 if (uNewCpl != pVCpu->iem.s.uCpl)
4922 {
4923 RTSEL NewSS;
4924 uint32_t uNewEsp;
4925 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4926 if (rcStrict != VINF_SUCCESS)
4927 return rcStrict;
4928
4929 IEMSELDESC DescSS;
4930 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4931 if (rcStrict != VINF_SUCCESS)
4932 return rcStrict;
4933 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4934 if (!DescSS.Legacy.Gen.u1DefBig)
4935 {
4936 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4937 uNewEsp = (uint16_t)uNewEsp;
4938 }
4939
4940 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4941
4942 /* Check that there is sufficient space for the stack frame. */
4943 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4944 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4945 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4946 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4947
4948 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4949 {
4950 if ( uNewEsp - 1 > cbLimitSS
4951 || uNewEsp < cbStackFrame)
4952 {
4953 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4954 u8Vector, NewSS, uNewEsp, cbStackFrame));
4955 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4956 }
4957 }
4958 else
4959 {
4960 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4961 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4962 {
4963 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4964 u8Vector, NewSS, uNewEsp, cbStackFrame));
4965 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4966 }
4967 }
4968
4969 /*
4970 * Start making changes.
4971 */
4972
4973 /* Set the new CPL so that stack accesses use it. */
4974 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4975 pVCpu->iem.s.uCpl = uNewCpl;
4976
4977 /* Create the stack frame. */
4978 RTPTRUNION uStackFrame;
4979 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4980 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4981 if (rcStrict != VINF_SUCCESS)
4982 return rcStrict;
4983 void * const pvStackFrame = uStackFrame.pv;
4984 if (f32BitGate)
4985 {
4986 if (fFlags & IEM_XCPT_FLAGS_ERR)
4987 *uStackFrame.pu32++ = uErr;
4988 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4989 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4990 uStackFrame.pu32[2] = fEfl;
4991 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4992 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4993 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4994 if (fEfl & X86_EFL_VM)
4995 {
4996 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4997 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4998 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4999 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5000 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5001 }
5002 }
5003 else
5004 {
5005 if (fFlags & IEM_XCPT_FLAGS_ERR)
5006 *uStackFrame.pu16++ = uErr;
5007 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5008 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5009 uStackFrame.pu16[2] = fEfl;
5010 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5011 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5012 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5013 if (fEfl & X86_EFL_VM)
5014 {
5015 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5016 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5017 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5018 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5019 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5020 }
5021 }
5022 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5023 if (rcStrict != VINF_SUCCESS)
5024 return rcStrict;
5025
5026 /* Mark the selectors 'accessed' (hope this is the correct time). */
5027 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5028 * after pushing the stack frame? (Write protect the gdt + stack to
5029 * find out.) */
5030 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5031 {
5032 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5033 if (rcStrict != VINF_SUCCESS)
5034 return rcStrict;
5035 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5036 }
5037
5038 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5039 {
5040 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5041 if (rcStrict != VINF_SUCCESS)
5042 return rcStrict;
5043 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5044 }
5045
5046 /*
5047 * Start comitting the register changes (joins with the DPL=CPL branch).
5048 */
5049 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5050 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5051 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5052 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5053 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5054 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5055 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5056 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5057 * SP is loaded).
5058 * Need to check the other combinations too:
5059 * - 16-bit TSS, 32-bit handler
5060 * - 32-bit TSS, 16-bit handler */
5061 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5062 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5063 else
5064 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5065
5066 if (fEfl & X86_EFL_VM)
5067 {
5068 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5069 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5070 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5071 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5072 }
5073 }
5074 /*
5075 * Same privilege, no stack change and smaller stack frame.
5076 */
5077 else
5078 {
5079 uint64_t uNewRsp;
5080 RTPTRUNION uStackFrame;
5081 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5082 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5083 if (rcStrict != VINF_SUCCESS)
5084 return rcStrict;
5085 void * const pvStackFrame = uStackFrame.pv;
5086
5087 if (f32BitGate)
5088 {
5089 if (fFlags & IEM_XCPT_FLAGS_ERR)
5090 *uStackFrame.pu32++ = uErr;
5091 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5092 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5093 uStackFrame.pu32[2] = fEfl;
5094 }
5095 else
5096 {
5097 if (fFlags & IEM_XCPT_FLAGS_ERR)
5098 *uStackFrame.pu16++ = uErr;
5099 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5100 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5101 uStackFrame.pu16[2] = fEfl;
5102 }
5103 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5104 if (rcStrict != VINF_SUCCESS)
5105 return rcStrict;
5106
5107 /* Mark the CS selector as 'accessed'. */
5108 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5109 {
5110 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5111 if (rcStrict != VINF_SUCCESS)
5112 return rcStrict;
5113 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5114 }
5115
5116 /*
5117 * Start committing the register changes (joins with the other branch).
5118 */
5119 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5120 }
5121
5122 /* ... register committing continues. */
5123 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5124 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5125 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5126 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5127 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5128 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5129
5130 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5131 fEfl &= ~fEflToClear;
5132 IEMMISC_SET_EFL(pVCpu, fEfl);
5133
5134 if (fFlags & IEM_XCPT_FLAGS_CR2)
5135 pVCpu->cpum.GstCtx.cr2 = uCr2;
5136
5137 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5138 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5139
5140 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5141}
5142
5143
5144/**
5145 * Implements exceptions and interrupts for long mode.
5146 *
5147 * @returns VBox strict status code.
5148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5149 * @param cbInstr The number of bytes to offset rIP by in the return
5150 * address.
5151 * @param u8Vector The interrupt / exception vector number.
5152 * @param fFlags The flags.
5153 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5154 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5155 */
5156IEM_STATIC VBOXSTRICTRC
5157iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5158 uint8_t cbInstr,
5159 uint8_t u8Vector,
5160 uint32_t fFlags,
5161 uint16_t uErr,
5162 uint64_t uCr2)
5163{
5164 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5165
5166 /*
5167 * Read the IDT entry.
5168 */
5169 uint16_t offIdt = (uint16_t)u8Vector << 4;
5170 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5171 {
5172 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5173 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5174 }
5175 X86DESC64 Idte;
5176 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5177 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5178 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5179 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5180 {
5181 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5182 return rcStrict;
5183 }
5184 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5185 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5186 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5187
5188 /*
5189 * Check the descriptor type, DPL and such.
5190 * ASSUMES this is done in the same order as described for call-gate calls.
5191 */
5192 if (Idte.Gate.u1DescType)
5193 {
5194 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5195 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5196 }
5197 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5198 switch (Idte.Gate.u4Type)
5199 {
5200 case AMD64_SEL_TYPE_SYS_INT_GATE:
5201 fEflToClear |= X86_EFL_IF;
5202 break;
5203 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5204 break;
5205
5206 default:
5207 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5208 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5209 }
5210
5211 /* Check DPL against CPL if applicable. */
5212 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5213 {
5214 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5215 {
5216 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5217 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5218 }
5219 }
5220
5221 /* Is it there? */
5222 if (!Idte.Gate.u1Present)
5223 {
5224 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5225 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5226 }
5227
5228 /* A null CS is bad. */
5229 RTSEL NewCS = Idte.Gate.u16Sel;
5230 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5231 {
5232 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5233 return iemRaiseGeneralProtectionFault0(pVCpu);
5234 }
5235
5236 /* Fetch the descriptor for the new CS. */
5237 IEMSELDESC DescCS;
5238 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5239 if (rcStrict != VINF_SUCCESS)
5240 {
5241 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5242 return rcStrict;
5243 }
5244
5245 /* Must be a 64-bit code segment. */
5246 if (!DescCS.Long.Gen.u1DescType)
5247 {
5248 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5249 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5250 }
5251 if ( !DescCS.Long.Gen.u1Long
5252 || DescCS.Long.Gen.u1DefBig
5253 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5254 {
5255 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5256 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5257 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5258 }
5259
5260 /* Don't allow lowering the privilege level. For non-conforming CS
5261 selectors, the CS.DPL sets the privilege level the trap/interrupt
5262 handler runs at. For conforming CS selectors, the CPL remains
5263 unchanged, but the CS.DPL must be <= CPL. */
5264 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5265 * when CPU in Ring-0. Result \#GP? */
5266 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5267 {
5268 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5269 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5270 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5271 }
5272
5273
5274 /* Make sure the selector is present. */
5275 if (!DescCS.Legacy.Gen.u1Present)
5276 {
5277 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5278 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5279 }
5280
5281 /* Check that the new RIP is canonical. */
5282 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5283 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5284 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5285 if (!IEM_IS_CANONICAL(uNewRip))
5286 {
5287 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5288 return iemRaiseGeneralProtectionFault0(pVCpu);
5289 }
5290
5291 /*
5292 * If the privilege level changes or if the IST isn't zero, we need to get
5293 * a new stack from the TSS.
5294 */
5295 uint64_t uNewRsp;
5296 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5297 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5298 if ( uNewCpl != pVCpu->iem.s.uCpl
5299 || Idte.Gate.u3IST != 0)
5300 {
5301 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5302 if (rcStrict != VINF_SUCCESS)
5303 return rcStrict;
5304 }
5305 else
5306 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5307 uNewRsp &= ~(uint64_t)0xf;
5308
5309 /*
5310 * Calc the flag image to push.
5311 */
5312 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5313 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5314 fEfl &= ~X86_EFL_RF;
5315 else
5316 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5317
5318 /*
5319 * Start making changes.
5320 */
5321 /* Set the new CPL so that stack accesses use it. */
5322 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5323 pVCpu->iem.s.uCpl = uNewCpl;
5324
5325 /* Create the stack frame. */
5326 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5327 RTPTRUNION uStackFrame;
5328 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5329 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5330 if (rcStrict != VINF_SUCCESS)
5331 return rcStrict;
5332 void * const pvStackFrame = uStackFrame.pv;
5333
5334 if (fFlags & IEM_XCPT_FLAGS_ERR)
5335 *uStackFrame.pu64++ = uErr;
5336 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5337 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5338 uStackFrame.pu64[2] = fEfl;
5339 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5340 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5341 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5342 if (rcStrict != VINF_SUCCESS)
5343 return rcStrict;
5344
5345 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5346 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5347 * after pushing the stack frame? (Write protect the gdt + stack to
5348 * find out.) */
5349 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5350 {
5351 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5352 if (rcStrict != VINF_SUCCESS)
5353 return rcStrict;
5354 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5355 }
5356
5357 /*
5358 * Start comitting the register changes.
5359 */
5360 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5361 * hidden registers when interrupting 32-bit or 16-bit code! */
5362 if (uNewCpl != uOldCpl)
5363 {
5364 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5365 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5366 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5367 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5368 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5369 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5370 }
5371 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5372 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5373 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5374 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5375 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5376 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5377 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5378 pVCpu->cpum.GstCtx.rip = uNewRip;
5379
5380 fEfl &= ~fEflToClear;
5381 IEMMISC_SET_EFL(pVCpu, fEfl);
5382
5383 if (fFlags & IEM_XCPT_FLAGS_CR2)
5384 pVCpu->cpum.GstCtx.cr2 = uCr2;
5385
5386 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5387 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5388
5389 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5390}
5391
5392
5393/**
5394 * Implements exceptions and interrupts.
5395 *
5396 * All exceptions and interrupts goes thru this function!
5397 *
5398 * @returns VBox strict status code.
5399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5400 * @param cbInstr The number of bytes to offset rIP by in the return
5401 * address.
5402 * @param u8Vector The interrupt / exception vector number.
5403 * @param fFlags The flags.
5404 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5405 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5406 */
5407DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5408iemRaiseXcptOrInt(PVMCPU pVCpu,
5409 uint8_t cbInstr,
5410 uint8_t u8Vector,
5411 uint32_t fFlags,
5412 uint16_t uErr,
5413 uint64_t uCr2)
5414{
5415 /*
5416 * Get all the state that we might need here.
5417 */
5418 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5419 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5420
5421#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5422 /*
5423 * Flush prefetch buffer
5424 */
5425 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5426#endif
5427
5428 /*
5429 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5430 */
5431 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5432 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5433 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5434 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5435 {
5436 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5437 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5438 u8Vector = X86_XCPT_GP;
5439 uErr = 0;
5440 }
5441#ifdef DBGFTRACE_ENABLED
5442 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5443 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5444 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5445#endif
5446
5447#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5448 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5449 {
5450 /*
5451 * If the event is being injected as part of VMRUN, it isn't subject to event
5452 * intercepts in the nested-guest. However, secondary exceptions that occur
5453 * during injection of any event -are- subject to exception intercepts.
5454 * See AMD spec. 15.20 "Event Injection".
5455 */
5456 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5457 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1;
5458 else
5459 {
5460 /*
5461 * Check and handle if the event being raised is intercepted.
5462 */
5463 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5464 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5465 return rcStrict0;
5466 }
5467 }
5468#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5469
5470 /*
5471 * Do recursion accounting.
5472 */
5473 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5474 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5475 if (pVCpu->iem.s.cXcptRecursions == 0)
5476 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5477 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5478 else
5479 {
5480 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5481 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5482 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5483
5484 if (pVCpu->iem.s.cXcptRecursions >= 4)
5485 {
5486#ifdef DEBUG_bird
5487 AssertFailed();
5488#endif
5489 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5490 }
5491
5492 /*
5493 * Evaluate the sequence of recurring events.
5494 */
5495 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5496 NULL /* pXcptRaiseInfo */);
5497 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5498 { /* likely */ }
5499 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5500 {
5501 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5502 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5503 u8Vector = X86_XCPT_DF;
5504 uErr = 0;
5505 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5506 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5507 IEM_RETURN_SVM_VMEXIT(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5508 }
5509 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5510 {
5511 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5512 return iemInitiateCpuShutdown(pVCpu);
5513 }
5514 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5515 {
5516 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5517 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5518 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5519 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5520 return VERR_EM_GUEST_CPU_HANG;
5521 }
5522 else
5523 {
5524 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5525 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5526 return VERR_IEM_IPE_9;
5527 }
5528
5529 /*
5530 * The 'EXT' bit is set when an exception occurs during deliver of an external
5531 * event (such as an interrupt or earlier exception)[1]. Privileged software
5532 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5533 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5534 *
5535 * [1] - Intel spec. 6.13 "Error Code"
5536 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5537 * [3] - Intel Instruction reference for INT n.
5538 */
5539 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5540 && (fFlags & IEM_XCPT_FLAGS_ERR)
5541 && u8Vector != X86_XCPT_PF
5542 && u8Vector != X86_XCPT_DF)
5543 {
5544 uErr |= X86_TRAP_ERR_EXTERNAL;
5545 }
5546 }
5547
5548 pVCpu->iem.s.cXcptRecursions++;
5549 pVCpu->iem.s.uCurXcpt = u8Vector;
5550 pVCpu->iem.s.fCurXcpt = fFlags;
5551 pVCpu->iem.s.uCurXcptErr = uErr;
5552 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5553
5554 /*
5555 * Extensive logging.
5556 */
5557#if defined(LOG_ENABLED) && defined(IN_RING3)
5558 if (LogIs3Enabled())
5559 {
5560 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5561 PVM pVM = pVCpu->CTX_SUFF(pVM);
5562 char szRegs[4096];
5563 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5564 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5565 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5566 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5567 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5568 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5569 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5570 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5571 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5572 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5573 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5574 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5575 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5576 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5577 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5578 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5579 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5580 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5581 " efer=%016VR{efer}\n"
5582 " pat=%016VR{pat}\n"
5583 " sf_mask=%016VR{sf_mask}\n"
5584 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5585 " lstar=%016VR{lstar}\n"
5586 " star=%016VR{star} cstar=%016VR{cstar}\n"
5587 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5588 );
5589
5590 char szInstr[256];
5591 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5592 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5593 szInstr, sizeof(szInstr), NULL);
5594 Log3(("%s%s\n", szRegs, szInstr));
5595 }
5596#endif /* LOG_ENABLED */
5597
5598 /*
5599 * Call the mode specific worker function.
5600 */
5601 VBOXSTRICTRC rcStrict;
5602 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5603 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5604 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5605 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5606 else
5607 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5608
5609 /* Flush the prefetch buffer. */
5610#ifdef IEM_WITH_CODE_TLB
5611 pVCpu->iem.s.pbInstrBuf = NULL;
5612#else
5613 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5614#endif
5615
5616 /*
5617 * Unwind.
5618 */
5619 pVCpu->iem.s.cXcptRecursions--;
5620 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5621 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5622 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5623 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5624 pVCpu->iem.s.cXcptRecursions + 1));
5625 return rcStrict;
5626}
5627
5628#ifdef IEM_WITH_SETJMP
5629/**
5630 * See iemRaiseXcptOrInt. Will not return.
5631 */
5632IEM_STATIC DECL_NO_RETURN(void)
5633iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5634 uint8_t cbInstr,
5635 uint8_t u8Vector,
5636 uint32_t fFlags,
5637 uint16_t uErr,
5638 uint64_t uCr2)
5639{
5640 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5641 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5642}
5643#endif
5644
5645
5646/** \#DE - 00. */
5647DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5648{
5649 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5650}
5651
5652
5653/** \#DB - 01.
5654 * @note This automatically clear DR7.GD. */
5655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5656{
5657 /** @todo set/clear RF. */
5658 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5659 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5660}
5661
5662
5663/** \#BR - 05. */
5664DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5665{
5666 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5667}
5668
5669
5670/** \#UD - 06. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5672{
5673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5674}
5675
5676
5677/** \#NM - 07. */
5678DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5679{
5680 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5681}
5682
5683
5684/** \#TS(err) - 0a. */
5685DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5686{
5687 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5688}
5689
5690
5691/** \#TS(tr) - 0a. */
5692DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5693{
5694 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5695 pVCpu->cpum.GstCtx.tr.Sel, 0);
5696}
5697
5698
5699/** \#TS(0) - 0a. */
5700DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5701{
5702 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5703 0, 0);
5704}
5705
5706
5707/** \#TS(err) - 0a. */
5708DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5709{
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5711 uSel & X86_SEL_MASK_OFF_RPL, 0);
5712}
5713
5714
5715/** \#NP(err) - 0b. */
5716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5717{
5718 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5719}
5720
5721
5722/** \#NP(sel) - 0b. */
5723DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5724{
5725 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5726 uSel & ~X86_SEL_RPL, 0);
5727}
5728
5729
5730/** \#SS(seg) - 0c. */
5731DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5732{
5733 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5734 uSel & ~X86_SEL_RPL, 0);
5735}
5736
5737
5738/** \#SS(err) - 0c. */
5739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5740{
5741 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5742}
5743
5744
5745/** \#GP(n) - 0d. */
5746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5747{
5748 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5749}
5750
5751
5752/** \#GP(0) - 0d. */
5753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5754{
5755 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5756}
5757
5758#ifdef IEM_WITH_SETJMP
5759/** \#GP(0) - 0d. */
5760DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5761{
5762 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5763}
5764#endif
5765
5766
5767/** \#GP(sel) - 0d. */
5768DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5769{
5770 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5771 Sel & ~X86_SEL_RPL, 0);
5772}
5773
5774
5775/** \#GP(0) - 0d. */
5776DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5777{
5778 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5779}
5780
5781
5782/** \#GP(sel) - 0d. */
5783DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5784{
5785 NOREF(iSegReg); NOREF(fAccess);
5786 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5787 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5788}
5789
5790#ifdef IEM_WITH_SETJMP
5791/** \#GP(sel) - 0d, longjmp. */
5792DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5793{
5794 NOREF(iSegReg); NOREF(fAccess);
5795 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5796 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5797}
5798#endif
5799
5800/** \#GP(sel) - 0d. */
5801DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5802{
5803 NOREF(Sel);
5804 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5805}
5806
5807#ifdef IEM_WITH_SETJMP
5808/** \#GP(sel) - 0d, longjmp. */
5809DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5810{
5811 NOREF(Sel);
5812 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5813}
5814#endif
5815
5816
5817/** \#GP(sel) - 0d. */
5818DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5819{
5820 NOREF(iSegReg); NOREF(fAccess);
5821 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5822}
5823
5824#ifdef IEM_WITH_SETJMP
5825/** \#GP(sel) - 0d, longjmp. */
5826DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5827 uint32_t fAccess)
5828{
5829 NOREF(iSegReg); NOREF(fAccess);
5830 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5831}
5832#endif
5833
5834
5835/** \#PF(n) - 0e. */
5836DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5837{
5838 uint16_t uErr;
5839 switch (rc)
5840 {
5841 case VERR_PAGE_NOT_PRESENT:
5842 case VERR_PAGE_TABLE_NOT_PRESENT:
5843 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5844 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5845 uErr = 0;
5846 break;
5847
5848 default:
5849 AssertMsgFailed(("%Rrc\n", rc));
5850 RT_FALL_THRU();
5851 case VERR_ACCESS_DENIED:
5852 uErr = X86_TRAP_PF_P;
5853 break;
5854
5855 /** @todo reserved */
5856 }
5857
5858 if (pVCpu->iem.s.uCpl == 3)
5859 uErr |= X86_TRAP_PF_US;
5860
5861 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5862 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5863 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5864 uErr |= X86_TRAP_PF_ID;
5865
5866#if 0 /* This is so much non-sense, really. Why was it done like that? */
5867 /* Note! RW access callers reporting a WRITE protection fault, will clear
5868 the READ flag before calling. So, read-modify-write accesses (RW)
5869 can safely be reported as READ faults. */
5870 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5871 uErr |= X86_TRAP_PF_RW;
5872#else
5873 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5874 {
5875 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5876 uErr |= X86_TRAP_PF_RW;
5877 }
5878#endif
5879
5880 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5881 uErr, GCPtrWhere);
5882}
5883
5884#ifdef IEM_WITH_SETJMP
5885/** \#PF(n) - 0e, longjmp. */
5886IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5887{
5888 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5889}
5890#endif
5891
5892
5893/** \#MF(0) - 10. */
5894DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5895{
5896 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5897}
5898
5899
5900/** \#AC(0) - 11. */
5901DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5902{
5903 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5904}
5905
5906
5907/**
5908 * Macro for calling iemCImplRaiseDivideError().
5909 *
5910 * This enables us to add/remove arguments and force different levels of
5911 * inlining as we wish.
5912 *
5913 * @return Strict VBox status code.
5914 */
5915#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5916IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5917{
5918 NOREF(cbInstr);
5919 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5920}
5921
5922
5923/**
5924 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5925 *
5926 * This enables us to add/remove arguments and force different levels of
5927 * inlining as we wish.
5928 *
5929 * @return Strict VBox status code.
5930 */
5931#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5932IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5933{
5934 NOREF(cbInstr);
5935 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5936}
5937
5938
5939/**
5940 * Macro for calling iemCImplRaiseInvalidOpcode().
5941 *
5942 * This enables us to add/remove arguments and force different levels of
5943 * inlining as we wish.
5944 *
5945 * @return Strict VBox status code.
5946 */
5947#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5948IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5949{
5950 NOREF(cbInstr);
5951 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5952}
5953
5954
5955/** @} */
5956
5957
5958/*
5959 *
5960 * Helpers routines.
5961 * Helpers routines.
5962 * Helpers routines.
5963 *
5964 */
5965
5966/**
5967 * Recalculates the effective operand size.
5968 *
5969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5970 */
5971IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5972{
5973 switch (pVCpu->iem.s.enmCpuMode)
5974 {
5975 case IEMMODE_16BIT:
5976 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5977 break;
5978 case IEMMODE_32BIT:
5979 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5980 break;
5981 case IEMMODE_64BIT:
5982 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5983 {
5984 case 0:
5985 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5986 break;
5987 case IEM_OP_PRF_SIZE_OP:
5988 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5989 break;
5990 case IEM_OP_PRF_SIZE_REX_W:
5991 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5992 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5993 break;
5994 }
5995 break;
5996 default:
5997 AssertFailed();
5998 }
5999}
6000
6001
6002/**
6003 * Sets the default operand size to 64-bit and recalculates the effective
6004 * operand size.
6005 *
6006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6007 */
6008IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6009{
6010 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6011 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6012 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6013 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6014 else
6015 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6016}
6017
6018
6019/*
6020 *
6021 * Common opcode decoders.
6022 * Common opcode decoders.
6023 * Common opcode decoders.
6024 *
6025 */
6026//#include <iprt/mem.h>
6027
6028/**
6029 * Used to add extra details about a stub case.
6030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6031 */
6032IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6033{
6034#if defined(LOG_ENABLED) && defined(IN_RING3)
6035 PVM pVM = pVCpu->CTX_SUFF(pVM);
6036 char szRegs[4096];
6037 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6038 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6039 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6040 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6041 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6042 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6043 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6044 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6045 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6046 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6047 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6048 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6049 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6050 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6051 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6052 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6053 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6054 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6055 " efer=%016VR{efer}\n"
6056 " pat=%016VR{pat}\n"
6057 " sf_mask=%016VR{sf_mask}\n"
6058 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6059 " lstar=%016VR{lstar}\n"
6060 " star=%016VR{star} cstar=%016VR{cstar}\n"
6061 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6062 );
6063
6064 char szInstr[256];
6065 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6066 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6067 szInstr, sizeof(szInstr), NULL);
6068
6069 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6070#else
6071 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6072#endif
6073}
6074
6075/**
6076 * Complains about a stub.
6077 *
6078 * Providing two versions of this macro, one for daily use and one for use when
6079 * working on IEM.
6080 */
6081#if 0
6082# define IEMOP_BITCH_ABOUT_STUB() \
6083 do { \
6084 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6085 iemOpStubMsg2(pVCpu); \
6086 RTAssertPanic(); \
6087 } while (0)
6088#else
6089# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6090#endif
6091
6092/** Stubs an opcode. */
6093#define FNIEMOP_STUB(a_Name) \
6094 FNIEMOP_DEF(a_Name) \
6095 { \
6096 RT_NOREF_PV(pVCpu); \
6097 IEMOP_BITCH_ABOUT_STUB(); \
6098 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6099 } \
6100 typedef int ignore_semicolon
6101
6102/** Stubs an opcode. */
6103#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6104 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6105 { \
6106 RT_NOREF_PV(pVCpu); \
6107 RT_NOREF_PV(a_Name0); \
6108 IEMOP_BITCH_ABOUT_STUB(); \
6109 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6110 } \
6111 typedef int ignore_semicolon
6112
6113/** Stubs an opcode which currently should raise \#UD. */
6114#define FNIEMOP_UD_STUB(a_Name) \
6115 FNIEMOP_DEF(a_Name) \
6116 { \
6117 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6118 return IEMOP_RAISE_INVALID_OPCODE(); \
6119 } \
6120 typedef int ignore_semicolon
6121
6122/** Stubs an opcode which currently should raise \#UD. */
6123#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6124 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6125 { \
6126 RT_NOREF_PV(pVCpu); \
6127 RT_NOREF_PV(a_Name0); \
6128 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6129 return IEMOP_RAISE_INVALID_OPCODE(); \
6130 } \
6131 typedef int ignore_semicolon
6132
6133
6134
6135/** @name Register Access.
6136 * @{
6137 */
6138
6139/**
6140 * Gets a reference (pointer) to the specified hidden segment register.
6141 *
6142 * @returns Hidden register reference.
6143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6144 * @param iSegReg The segment register.
6145 */
6146IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6147{
6148 Assert(iSegReg < X86_SREG_COUNT);
6149 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6150 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6151
6152#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6153 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6154 { /* likely */ }
6155 else
6156 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6157#else
6158 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6159#endif
6160 return pSReg;
6161}
6162
6163
6164/**
6165 * Ensures that the given hidden segment register is up to date.
6166 *
6167 * @returns Hidden register reference.
6168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6169 * @param pSReg The segment register.
6170 */
6171IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6172{
6173#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6174 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6175 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6176#else
6177 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6178 NOREF(pVCpu);
6179#endif
6180 return pSReg;
6181}
6182
6183
6184/**
6185 * Gets a reference (pointer) to the specified segment register (the selector
6186 * value).
6187 *
6188 * @returns Pointer to the selector variable.
6189 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6190 * @param iSegReg The segment register.
6191 */
6192DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6193{
6194 Assert(iSegReg < X86_SREG_COUNT);
6195 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6196 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6197}
6198
6199
6200/**
6201 * Fetches the selector value of a segment register.
6202 *
6203 * @returns The selector value.
6204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6205 * @param iSegReg The segment register.
6206 */
6207DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6208{
6209 Assert(iSegReg < X86_SREG_COUNT);
6210 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6211 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6212}
6213
6214
6215/**
6216 * Fetches the base address value of a segment register.
6217 *
6218 * @returns The selector value.
6219 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6220 * @param iSegReg The segment register.
6221 */
6222DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6223{
6224 Assert(iSegReg < X86_SREG_COUNT);
6225 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6226 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6227}
6228
6229
6230/**
6231 * Gets a reference (pointer) to the specified general purpose register.
6232 *
6233 * @returns Register reference.
6234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6235 * @param iReg The general purpose register.
6236 */
6237DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6238{
6239 Assert(iReg < 16);
6240 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6241}
6242
6243
6244/**
6245 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6246 *
6247 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6248 *
6249 * @returns Register reference.
6250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6251 * @param iReg The register.
6252 */
6253DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6254{
6255 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6256 {
6257 Assert(iReg < 16);
6258 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6259 }
6260 /* high 8-bit register. */
6261 Assert(iReg < 8);
6262 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6263}
6264
6265
6266/**
6267 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6268 *
6269 * @returns Register reference.
6270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6271 * @param iReg The register.
6272 */
6273DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6274{
6275 Assert(iReg < 16);
6276 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6277}
6278
6279
6280/**
6281 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6282 *
6283 * @returns Register reference.
6284 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6285 * @param iReg The register.
6286 */
6287DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6288{
6289 Assert(iReg < 16);
6290 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6291}
6292
6293
6294/**
6295 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6296 *
6297 * @returns Register reference.
6298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6299 * @param iReg The register.
6300 */
6301DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6302{
6303 Assert(iReg < 64);
6304 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6305}
6306
6307
6308/**
6309 * Gets a reference (pointer) to the specified segment register's base address.
6310 *
6311 * @returns Segment register base address reference.
6312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6313 * @param iSegReg The segment selector.
6314 */
6315DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6316{
6317 Assert(iSegReg < X86_SREG_COUNT);
6318 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6319 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6320}
6321
6322
6323/**
6324 * Fetches the value of a 8-bit general purpose register.
6325 *
6326 * @returns The register value.
6327 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6328 * @param iReg The register.
6329 */
6330DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6331{
6332 return *iemGRegRefU8(pVCpu, iReg);
6333}
6334
6335
6336/**
6337 * Fetches the value of a 16-bit general purpose register.
6338 *
6339 * @returns The register value.
6340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6341 * @param iReg The register.
6342 */
6343DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6344{
6345 Assert(iReg < 16);
6346 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6347}
6348
6349
6350/**
6351 * Fetches the value of a 32-bit general purpose register.
6352 *
6353 * @returns The register value.
6354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6355 * @param iReg The register.
6356 */
6357DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6358{
6359 Assert(iReg < 16);
6360 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6361}
6362
6363
6364/**
6365 * Fetches the value of a 64-bit general purpose register.
6366 *
6367 * @returns The register value.
6368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6369 * @param iReg The register.
6370 */
6371DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6372{
6373 Assert(iReg < 16);
6374 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6375}
6376
6377
6378/**
6379 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6380 *
6381 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6382 * segment limit.
6383 *
6384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6385 * @param offNextInstr The offset of the next instruction.
6386 */
6387IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6388{
6389 switch (pVCpu->iem.s.enmEffOpSize)
6390 {
6391 case IEMMODE_16BIT:
6392 {
6393 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6394 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6395 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6396 return iemRaiseGeneralProtectionFault0(pVCpu);
6397 pVCpu->cpum.GstCtx.rip = uNewIp;
6398 break;
6399 }
6400
6401 case IEMMODE_32BIT:
6402 {
6403 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6404 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6405
6406 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6407 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6408 return iemRaiseGeneralProtectionFault0(pVCpu);
6409 pVCpu->cpum.GstCtx.rip = uNewEip;
6410 break;
6411 }
6412
6413 case IEMMODE_64BIT:
6414 {
6415 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6416
6417 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6418 if (!IEM_IS_CANONICAL(uNewRip))
6419 return iemRaiseGeneralProtectionFault0(pVCpu);
6420 pVCpu->cpum.GstCtx.rip = uNewRip;
6421 break;
6422 }
6423
6424 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6425 }
6426
6427 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6428
6429#ifndef IEM_WITH_CODE_TLB
6430 /* Flush the prefetch buffer. */
6431 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6432#endif
6433
6434 return VINF_SUCCESS;
6435}
6436
6437
6438/**
6439 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6440 *
6441 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6442 * segment limit.
6443 *
6444 * @returns Strict VBox status code.
6445 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6446 * @param offNextInstr The offset of the next instruction.
6447 */
6448IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6449{
6450 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6451
6452 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6453 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6454 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6455 return iemRaiseGeneralProtectionFault0(pVCpu);
6456 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6457 pVCpu->cpum.GstCtx.rip = uNewIp;
6458 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6459
6460#ifndef IEM_WITH_CODE_TLB
6461 /* Flush the prefetch buffer. */
6462 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6463#endif
6464
6465 return VINF_SUCCESS;
6466}
6467
6468
6469/**
6470 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6471 *
6472 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6473 * segment limit.
6474 *
6475 * @returns Strict VBox status code.
6476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6477 * @param offNextInstr The offset of the next instruction.
6478 */
6479IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6480{
6481 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6482
6483 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6484 {
6485 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6486
6487 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6488 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6489 return iemRaiseGeneralProtectionFault0(pVCpu);
6490 pVCpu->cpum.GstCtx.rip = uNewEip;
6491 }
6492 else
6493 {
6494 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6495
6496 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6497 if (!IEM_IS_CANONICAL(uNewRip))
6498 return iemRaiseGeneralProtectionFault0(pVCpu);
6499 pVCpu->cpum.GstCtx.rip = uNewRip;
6500 }
6501 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6502
6503#ifndef IEM_WITH_CODE_TLB
6504 /* Flush the prefetch buffer. */
6505 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6506#endif
6507
6508 return VINF_SUCCESS;
6509}
6510
6511
6512/**
6513 * Performs a near jump to the specified address.
6514 *
6515 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6516 * segment limit.
6517 *
6518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6519 * @param uNewRip The new RIP value.
6520 */
6521IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6522{
6523 switch (pVCpu->iem.s.enmEffOpSize)
6524 {
6525 case IEMMODE_16BIT:
6526 {
6527 Assert(uNewRip <= UINT16_MAX);
6528 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6529 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6530 return iemRaiseGeneralProtectionFault0(pVCpu);
6531 /** @todo Test 16-bit jump in 64-bit mode. */
6532 pVCpu->cpum.GstCtx.rip = uNewRip;
6533 break;
6534 }
6535
6536 case IEMMODE_32BIT:
6537 {
6538 Assert(uNewRip <= UINT32_MAX);
6539 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6540 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6541
6542 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6543 return iemRaiseGeneralProtectionFault0(pVCpu);
6544 pVCpu->cpum.GstCtx.rip = uNewRip;
6545 break;
6546 }
6547
6548 case IEMMODE_64BIT:
6549 {
6550 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6551
6552 if (!IEM_IS_CANONICAL(uNewRip))
6553 return iemRaiseGeneralProtectionFault0(pVCpu);
6554 pVCpu->cpum.GstCtx.rip = uNewRip;
6555 break;
6556 }
6557
6558 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6559 }
6560
6561 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6562
6563#ifndef IEM_WITH_CODE_TLB
6564 /* Flush the prefetch buffer. */
6565 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6566#endif
6567
6568 return VINF_SUCCESS;
6569}
6570
6571
6572/**
6573 * Get the address of the top of the stack.
6574 *
6575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6576 */
6577DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6578{
6579 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6580 return pVCpu->cpum.GstCtx.rsp;
6581 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6582 return pVCpu->cpum.GstCtx.esp;
6583 return pVCpu->cpum.GstCtx.sp;
6584}
6585
6586
6587/**
6588 * Updates the RIP/EIP/IP to point to the next instruction.
6589 *
6590 * This function leaves the EFLAGS.RF flag alone.
6591 *
6592 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6593 * @param cbInstr The number of bytes to add.
6594 */
6595IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6596{
6597 switch (pVCpu->iem.s.enmCpuMode)
6598 {
6599 case IEMMODE_16BIT:
6600 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6601 pVCpu->cpum.GstCtx.eip += cbInstr;
6602 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6603 break;
6604
6605 case IEMMODE_32BIT:
6606 pVCpu->cpum.GstCtx.eip += cbInstr;
6607 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6608 break;
6609
6610 case IEMMODE_64BIT:
6611 pVCpu->cpum.GstCtx.rip += cbInstr;
6612 break;
6613 default: AssertFailed();
6614 }
6615}
6616
6617
6618#if 0
6619/**
6620 * Updates the RIP/EIP/IP to point to the next instruction.
6621 *
6622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6623 */
6624IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6625{
6626 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6627}
6628#endif
6629
6630
6631
6632/**
6633 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6634 *
6635 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6636 * @param cbInstr The number of bytes to add.
6637 */
6638IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6639{
6640 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6641
6642 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6643#if ARCH_BITS >= 64
6644 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6645 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6646 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6647#else
6648 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6649 pVCpu->cpum.GstCtx.rip += cbInstr;
6650 else
6651 pVCpu->cpum.GstCtx.eip += cbInstr;
6652#endif
6653}
6654
6655
6656/**
6657 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6658 *
6659 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6660 */
6661IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6662{
6663 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6664}
6665
6666
6667/**
6668 * Adds to the stack pointer.
6669 *
6670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6671 * @param cbToAdd The number of bytes to add (8-bit!).
6672 */
6673DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6674{
6675 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6676 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6677 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6678 pVCpu->cpum.GstCtx.esp += cbToAdd;
6679 else
6680 pVCpu->cpum.GstCtx.sp += cbToAdd;
6681}
6682
6683
6684/**
6685 * Subtracts from the stack pointer.
6686 *
6687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6688 * @param cbToSub The number of bytes to subtract (8-bit!).
6689 */
6690DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6691{
6692 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6693 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6694 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6695 pVCpu->cpum.GstCtx.esp -= cbToSub;
6696 else
6697 pVCpu->cpum.GstCtx.sp -= cbToSub;
6698}
6699
6700
6701/**
6702 * Adds to the temporary stack pointer.
6703 *
6704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6705 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6706 * @param cbToAdd The number of bytes to add (16-bit).
6707 */
6708DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6709{
6710 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6711 pTmpRsp->u += cbToAdd;
6712 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6713 pTmpRsp->DWords.dw0 += cbToAdd;
6714 else
6715 pTmpRsp->Words.w0 += cbToAdd;
6716}
6717
6718
6719/**
6720 * Subtracts from the temporary stack pointer.
6721 *
6722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6723 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6724 * @param cbToSub The number of bytes to subtract.
6725 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6726 * expecting that.
6727 */
6728DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6729{
6730 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6731 pTmpRsp->u -= cbToSub;
6732 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6733 pTmpRsp->DWords.dw0 -= cbToSub;
6734 else
6735 pTmpRsp->Words.w0 -= cbToSub;
6736}
6737
6738
6739/**
6740 * Calculates the effective stack address for a push of the specified size as
6741 * well as the new RSP value (upper bits may be masked).
6742 *
6743 * @returns Effective stack addressf for the push.
6744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6745 * @param cbItem The size of the stack item to pop.
6746 * @param puNewRsp Where to return the new RSP value.
6747 */
6748DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6749{
6750 RTUINT64U uTmpRsp;
6751 RTGCPTR GCPtrTop;
6752 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6753
6754 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6755 GCPtrTop = uTmpRsp.u -= cbItem;
6756 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6757 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6758 else
6759 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6760 *puNewRsp = uTmpRsp.u;
6761 return GCPtrTop;
6762}
6763
6764
6765/**
6766 * Gets the current stack pointer and calculates the value after a pop of the
6767 * specified size.
6768 *
6769 * @returns Current stack pointer.
6770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6771 * @param cbItem The size of the stack item to pop.
6772 * @param puNewRsp Where to return the new RSP value.
6773 */
6774DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6775{
6776 RTUINT64U uTmpRsp;
6777 RTGCPTR GCPtrTop;
6778 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6779
6780 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6781 {
6782 GCPtrTop = uTmpRsp.u;
6783 uTmpRsp.u += cbItem;
6784 }
6785 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6786 {
6787 GCPtrTop = uTmpRsp.DWords.dw0;
6788 uTmpRsp.DWords.dw0 += cbItem;
6789 }
6790 else
6791 {
6792 GCPtrTop = uTmpRsp.Words.w0;
6793 uTmpRsp.Words.w0 += cbItem;
6794 }
6795 *puNewRsp = uTmpRsp.u;
6796 return GCPtrTop;
6797}
6798
6799
6800/**
6801 * Calculates the effective stack address for a push of the specified size as
6802 * well as the new temporary RSP value (upper bits may be masked).
6803 *
6804 * @returns Effective stack addressf for the push.
6805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6806 * @param pTmpRsp The temporary stack pointer. This is updated.
6807 * @param cbItem The size of the stack item to pop.
6808 */
6809DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6810{
6811 RTGCPTR GCPtrTop;
6812
6813 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6814 GCPtrTop = pTmpRsp->u -= cbItem;
6815 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6816 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6817 else
6818 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6819 return GCPtrTop;
6820}
6821
6822
6823/**
6824 * Gets the effective stack address for a pop of the specified size and
6825 * calculates and updates the temporary RSP.
6826 *
6827 * @returns Current stack pointer.
6828 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6829 * @param pTmpRsp The temporary stack pointer. This is updated.
6830 * @param cbItem The size of the stack item to pop.
6831 */
6832DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6833{
6834 RTGCPTR GCPtrTop;
6835 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6836 {
6837 GCPtrTop = pTmpRsp->u;
6838 pTmpRsp->u += cbItem;
6839 }
6840 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6841 {
6842 GCPtrTop = pTmpRsp->DWords.dw0;
6843 pTmpRsp->DWords.dw0 += cbItem;
6844 }
6845 else
6846 {
6847 GCPtrTop = pTmpRsp->Words.w0;
6848 pTmpRsp->Words.w0 += cbItem;
6849 }
6850 return GCPtrTop;
6851}
6852
6853/** @} */
6854
6855
6856/** @name FPU access and helpers.
6857 *
6858 * @{
6859 */
6860
6861
6862/**
6863 * Hook for preparing to use the host FPU.
6864 *
6865 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6866 *
6867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6868 */
6869DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6870{
6871#ifdef IN_RING3
6872 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6873#else
6874 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6875#endif
6876 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6877}
6878
6879
6880/**
6881 * Hook for preparing to use the host FPU for SSE.
6882 *
6883 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6884 *
6885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6886 */
6887DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6888{
6889 iemFpuPrepareUsage(pVCpu);
6890}
6891
6892
6893/**
6894 * Hook for preparing to use the host FPU for AVX.
6895 *
6896 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6897 *
6898 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6899 */
6900DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6901{
6902 iemFpuPrepareUsage(pVCpu);
6903}
6904
6905
6906/**
6907 * Hook for actualizing the guest FPU state before the interpreter reads it.
6908 *
6909 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6910 *
6911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6912 */
6913DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6914{
6915#ifdef IN_RING3
6916 NOREF(pVCpu);
6917#else
6918 CPUMRZFpuStateActualizeForRead(pVCpu);
6919#endif
6920 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6921}
6922
6923
6924/**
6925 * Hook for actualizing the guest FPU state before the interpreter changes it.
6926 *
6927 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6928 *
6929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6930 */
6931DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6932{
6933#ifdef IN_RING3
6934 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6935#else
6936 CPUMRZFpuStateActualizeForChange(pVCpu);
6937#endif
6938 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6939}
6940
6941
6942/**
6943 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6944 * only.
6945 *
6946 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6947 *
6948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6949 */
6950DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6951{
6952#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6953 NOREF(pVCpu);
6954#else
6955 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6956#endif
6957 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6958}
6959
6960
6961/**
6962 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6963 * read+write.
6964 *
6965 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6966 *
6967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6968 */
6969DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6970{
6971#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6972 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6973#else
6974 CPUMRZFpuStateActualizeForChange(pVCpu);
6975#endif
6976 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6977}
6978
6979
6980/**
6981 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6982 * only.
6983 *
6984 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6985 *
6986 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6987 */
6988DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6989{
6990#ifdef IN_RING3
6991 NOREF(pVCpu);
6992#else
6993 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6994#endif
6995 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6996}
6997
6998
6999/**
7000 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7001 * read+write.
7002 *
7003 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7004 *
7005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7006 */
7007DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7008{
7009#ifdef IN_RING3
7010 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7011#else
7012 CPUMRZFpuStateActualizeForChange(pVCpu);
7013#endif
7014 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7015}
7016
7017
7018/**
7019 * Stores a QNaN value into a FPU register.
7020 *
7021 * @param pReg Pointer to the register.
7022 */
7023DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7024{
7025 pReg->au32[0] = UINT32_C(0x00000000);
7026 pReg->au32[1] = UINT32_C(0xc0000000);
7027 pReg->au16[4] = UINT16_C(0xffff);
7028}
7029
7030
7031/**
7032 * Updates the FOP, FPU.CS and FPUIP registers.
7033 *
7034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7035 * @param pFpuCtx The FPU context.
7036 */
7037DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7038{
7039 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7040 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7041 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7042 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7043 {
7044 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7045 * happens in real mode here based on the fnsave and fnstenv images. */
7046 pFpuCtx->CS = 0;
7047 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7048 }
7049 else
7050 {
7051 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7052 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7053 }
7054}
7055
7056
7057/**
7058 * Updates the x87.DS and FPUDP registers.
7059 *
7060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7061 * @param pFpuCtx The FPU context.
7062 * @param iEffSeg The effective segment register.
7063 * @param GCPtrEff The effective address relative to @a iEffSeg.
7064 */
7065DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7066{
7067 RTSEL sel;
7068 switch (iEffSeg)
7069 {
7070 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7071 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7072 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7073 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7074 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7075 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7076 default:
7077 AssertMsgFailed(("%d\n", iEffSeg));
7078 sel = pVCpu->cpum.GstCtx.ds.Sel;
7079 }
7080 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7081 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7082 {
7083 pFpuCtx->DS = 0;
7084 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7085 }
7086 else
7087 {
7088 pFpuCtx->DS = sel;
7089 pFpuCtx->FPUDP = GCPtrEff;
7090 }
7091}
7092
7093
7094/**
7095 * Rotates the stack registers in the push direction.
7096 *
7097 * @param pFpuCtx The FPU context.
7098 * @remarks This is a complete waste of time, but fxsave stores the registers in
7099 * stack order.
7100 */
7101DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7102{
7103 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7104 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7105 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7106 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7107 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7108 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7109 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7110 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7111 pFpuCtx->aRegs[0].r80 = r80Tmp;
7112}
7113
7114
7115/**
7116 * Rotates the stack registers in the pop direction.
7117 *
7118 * @param pFpuCtx The FPU context.
7119 * @remarks This is a complete waste of time, but fxsave stores the registers in
7120 * stack order.
7121 */
7122DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7123{
7124 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7125 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7126 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7127 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7128 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7129 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7130 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7131 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7132 pFpuCtx->aRegs[7].r80 = r80Tmp;
7133}
7134
7135
7136/**
7137 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7138 * exception prevents it.
7139 *
7140 * @param pResult The FPU operation result to push.
7141 * @param pFpuCtx The FPU context.
7142 */
7143IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7144{
7145 /* Update FSW and bail if there are pending exceptions afterwards. */
7146 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7147 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7148 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7149 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7150 {
7151 pFpuCtx->FSW = fFsw;
7152 return;
7153 }
7154
7155 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7156 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7157 {
7158 /* All is fine, push the actual value. */
7159 pFpuCtx->FTW |= RT_BIT(iNewTop);
7160 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7161 }
7162 else if (pFpuCtx->FCW & X86_FCW_IM)
7163 {
7164 /* Masked stack overflow, push QNaN. */
7165 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7166 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7167 }
7168 else
7169 {
7170 /* Raise stack overflow, don't push anything. */
7171 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7172 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7173 return;
7174 }
7175
7176 fFsw &= ~X86_FSW_TOP_MASK;
7177 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7178 pFpuCtx->FSW = fFsw;
7179
7180 iemFpuRotateStackPush(pFpuCtx);
7181}
7182
7183
7184/**
7185 * Stores a result in a FPU register and updates the FSW and FTW.
7186 *
7187 * @param pFpuCtx The FPU context.
7188 * @param pResult The result to store.
7189 * @param iStReg Which FPU register to store it in.
7190 */
7191IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7192{
7193 Assert(iStReg < 8);
7194 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7195 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7196 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7197 pFpuCtx->FTW |= RT_BIT(iReg);
7198 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7199}
7200
7201
7202/**
7203 * Only updates the FPU status word (FSW) with the result of the current
7204 * instruction.
7205 *
7206 * @param pFpuCtx The FPU context.
7207 * @param u16FSW The FSW output of the current instruction.
7208 */
7209IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7210{
7211 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7212 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7213}
7214
7215
7216/**
7217 * Pops one item off the FPU stack if no pending exception prevents it.
7218 *
7219 * @param pFpuCtx The FPU context.
7220 */
7221IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7222{
7223 /* Check pending exceptions. */
7224 uint16_t uFSW = pFpuCtx->FSW;
7225 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7226 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7227 return;
7228
7229 /* TOP--. */
7230 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7231 uFSW &= ~X86_FSW_TOP_MASK;
7232 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7233 pFpuCtx->FSW = uFSW;
7234
7235 /* Mark the previous ST0 as empty. */
7236 iOldTop >>= X86_FSW_TOP_SHIFT;
7237 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7238
7239 /* Rotate the registers. */
7240 iemFpuRotateStackPop(pFpuCtx);
7241}
7242
7243
7244/**
7245 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7246 *
7247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7248 * @param pResult The FPU operation result to push.
7249 */
7250IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7251{
7252 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7253 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7254 iemFpuMaybePushResult(pResult, pFpuCtx);
7255}
7256
7257
7258/**
7259 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7260 * and sets FPUDP and FPUDS.
7261 *
7262 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7263 * @param pResult The FPU operation result to push.
7264 * @param iEffSeg The effective segment register.
7265 * @param GCPtrEff The effective address relative to @a iEffSeg.
7266 */
7267IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7268{
7269 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7270 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7271 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7272 iemFpuMaybePushResult(pResult, pFpuCtx);
7273}
7274
7275
7276/**
7277 * Replace ST0 with the first value and push the second onto the FPU stack,
7278 * unless a pending exception prevents it.
7279 *
7280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7281 * @param pResult The FPU operation result to store and push.
7282 */
7283IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7284{
7285 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7286 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7287
7288 /* Update FSW and bail if there are pending exceptions afterwards. */
7289 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7290 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7291 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7292 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7293 {
7294 pFpuCtx->FSW = fFsw;
7295 return;
7296 }
7297
7298 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7299 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7300 {
7301 /* All is fine, push the actual value. */
7302 pFpuCtx->FTW |= RT_BIT(iNewTop);
7303 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7304 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7305 }
7306 else if (pFpuCtx->FCW & X86_FCW_IM)
7307 {
7308 /* Masked stack overflow, push QNaN. */
7309 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7310 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7311 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7312 }
7313 else
7314 {
7315 /* Raise stack overflow, don't push anything. */
7316 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7317 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7318 return;
7319 }
7320
7321 fFsw &= ~X86_FSW_TOP_MASK;
7322 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7323 pFpuCtx->FSW = fFsw;
7324
7325 iemFpuRotateStackPush(pFpuCtx);
7326}
7327
7328
7329/**
7330 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7331 * FOP.
7332 *
7333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7334 * @param pResult The result to store.
7335 * @param iStReg Which FPU register to store it in.
7336 */
7337IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7338{
7339 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7340 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7341 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7342}
7343
7344
7345/**
7346 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7347 * FOP, and then pops the stack.
7348 *
7349 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7350 * @param pResult The result to store.
7351 * @param iStReg Which FPU register to store it in.
7352 */
7353IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7354{
7355 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7356 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7357 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7358 iemFpuMaybePopOne(pFpuCtx);
7359}
7360
7361
7362/**
7363 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7364 * FPUDP, and FPUDS.
7365 *
7366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7367 * @param pResult The result to store.
7368 * @param iStReg Which FPU register to store it in.
7369 * @param iEffSeg The effective memory operand selector register.
7370 * @param GCPtrEff The effective memory operand offset.
7371 */
7372IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7373 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7374{
7375 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7376 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7377 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7378 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7379}
7380
7381
7382/**
7383 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7384 * FPUDP, and FPUDS, and then pops the stack.
7385 *
7386 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7387 * @param pResult The result to store.
7388 * @param iStReg Which FPU register to store it in.
7389 * @param iEffSeg The effective memory operand selector register.
7390 * @param GCPtrEff The effective memory operand offset.
7391 */
7392IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7393 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7394{
7395 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7396 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7397 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7398 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7399 iemFpuMaybePopOne(pFpuCtx);
7400}
7401
7402
7403/**
7404 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7405 *
7406 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7407 */
7408IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7409{
7410 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7411 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7412}
7413
7414
7415/**
7416 * Marks the specified stack register as free (for FFREE).
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param iStReg The register to free.
7420 */
7421IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7422{
7423 Assert(iStReg < 8);
7424 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7425 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7426 pFpuCtx->FTW &= ~RT_BIT(iReg);
7427}
7428
7429
7430/**
7431 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7432 *
7433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7434 */
7435IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7436{
7437 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7438 uint16_t uFsw = pFpuCtx->FSW;
7439 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7440 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7441 uFsw &= ~X86_FSW_TOP_MASK;
7442 uFsw |= uTop;
7443 pFpuCtx->FSW = uFsw;
7444}
7445
7446
7447/**
7448 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7449 *
7450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7451 */
7452IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7453{
7454 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7455 uint16_t uFsw = pFpuCtx->FSW;
7456 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7457 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7458 uFsw &= ~X86_FSW_TOP_MASK;
7459 uFsw |= uTop;
7460 pFpuCtx->FSW = uFsw;
7461}
7462
7463
7464/**
7465 * Updates the FSW, FOP, FPUIP, and FPUCS.
7466 *
7467 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7468 * @param u16FSW The FSW from the current instruction.
7469 */
7470IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7471{
7472 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7473 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7474 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7475}
7476
7477
7478/**
7479 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7480 *
7481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7482 * @param u16FSW The FSW from the current instruction.
7483 */
7484IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7485{
7486 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7487 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7488 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7489 iemFpuMaybePopOne(pFpuCtx);
7490}
7491
7492
7493/**
7494 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7495 *
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 * @param u16FSW The FSW from the current instruction.
7498 * @param iEffSeg The effective memory operand selector register.
7499 * @param GCPtrEff The effective memory operand offset.
7500 */
7501IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7502{
7503 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7504 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7505 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7506 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7507}
7508
7509
7510/**
7511 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7512 *
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param u16FSW The FSW from the current instruction.
7515 */
7516IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7517{
7518 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7519 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7520 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7521 iemFpuMaybePopOne(pFpuCtx);
7522 iemFpuMaybePopOne(pFpuCtx);
7523}
7524
7525
7526/**
7527 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7528 *
7529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7530 * @param u16FSW The FSW from the current instruction.
7531 * @param iEffSeg The effective memory operand selector register.
7532 * @param GCPtrEff The effective memory operand offset.
7533 */
7534IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7535{
7536 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7537 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7538 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7539 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7540 iemFpuMaybePopOne(pFpuCtx);
7541}
7542
7543
7544/**
7545 * Worker routine for raising an FPU stack underflow exception.
7546 *
7547 * @param pFpuCtx The FPU context.
7548 * @param iStReg The stack register being accessed.
7549 */
7550IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7551{
7552 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7553 if (pFpuCtx->FCW & X86_FCW_IM)
7554 {
7555 /* Masked underflow. */
7556 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7557 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7558 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7559 if (iStReg != UINT8_MAX)
7560 {
7561 pFpuCtx->FTW |= RT_BIT(iReg);
7562 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7563 }
7564 }
7565 else
7566 {
7567 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7568 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7569 }
7570}
7571
7572
7573/**
7574 * Raises a FPU stack underflow exception.
7575 *
7576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7577 * @param iStReg The destination register that should be loaded
7578 * with QNaN if \#IS is not masked. Specify
7579 * UINT8_MAX if none (like for fcom).
7580 */
7581DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7582{
7583 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7584 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7585 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7586}
7587
7588
7589DECL_NO_INLINE(IEM_STATIC, void)
7590iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7591{
7592 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7593 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7594 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7595 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7596}
7597
7598
7599DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7600{
7601 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7602 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7603 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7604 iemFpuMaybePopOne(pFpuCtx);
7605}
7606
7607
7608DECL_NO_INLINE(IEM_STATIC, void)
7609iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7610{
7611 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7612 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7613 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7614 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7615 iemFpuMaybePopOne(pFpuCtx);
7616}
7617
7618
7619DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7620{
7621 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7622 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7623 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7624 iemFpuMaybePopOne(pFpuCtx);
7625 iemFpuMaybePopOne(pFpuCtx);
7626}
7627
7628
7629DECL_NO_INLINE(IEM_STATIC, void)
7630iemFpuStackPushUnderflow(PVMCPU pVCpu)
7631{
7632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7633 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7634
7635 if (pFpuCtx->FCW & X86_FCW_IM)
7636 {
7637 /* Masked overflow - Push QNaN. */
7638 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7639 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7640 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7641 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7642 pFpuCtx->FTW |= RT_BIT(iNewTop);
7643 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7644 iemFpuRotateStackPush(pFpuCtx);
7645 }
7646 else
7647 {
7648 /* Exception pending - don't change TOP or the register stack. */
7649 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7650 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7651 }
7652}
7653
7654
7655DECL_NO_INLINE(IEM_STATIC, void)
7656iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7657{
7658 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7659 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7660
7661 if (pFpuCtx->FCW & X86_FCW_IM)
7662 {
7663 /* Masked overflow - Push QNaN. */
7664 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7665 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7666 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7667 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7668 pFpuCtx->FTW |= RT_BIT(iNewTop);
7669 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7670 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7671 iemFpuRotateStackPush(pFpuCtx);
7672 }
7673 else
7674 {
7675 /* Exception pending - don't change TOP or the register stack. */
7676 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7677 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7678 }
7679}
7680
7681
7682/**
7683 * Worker routine for raising an FPU stack overflow exception on a push.
7684 *
7685 * @param pFpuCtx The FPU context.
7686 */
7687IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7688{
7689 if (pFpuCtx->FCW & X86_FCW_IM)
7690 {
7691 /* Masked overflow. */
7692 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7693 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7694 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7695 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7696 pFpuCtx->FTW |= RT_BIT(iNewTop);
7697 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7698 iemFpuRotateStackPush(pFpuCtx);
7699 }
7700 else
7701 {
7702 /* Exception pending - don't change TOP or the register stack. */
7703 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7704 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7705 }
7706}
7707
7708
7709/**
7710 * Raises a FPU stack overflow exception on a push.
7711 *
7712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7713 */
7714DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7715{
7716 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7717 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7718 iemFpuStackPushOverflowOnly(pFpuCtx);
7719}
7720
7721
7722/**
7723 * Raises a FPU stack overflow exception on a push with a memory operand.
7724 *
7725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7726 * @param iEffSeg The effective memory operand selector register.
7727 * @param GCPtrEff The effective memory operand offset.
7728 */
7729DECL_NO_INLINE(IEM_STATIC, void)
7730iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7731{
7732 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7733 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7734 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7735 iemFpuStackPushOverflowOnly(pFpuCtx);
7736}
7737
7738
7739IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7740{
7741 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7742 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7743 if (pFpuCtx->FTW & RT_BIT(iReg))
7744 return VINF_SUCCESS;
7745 return VERR_NOT_FOUND;
7746}
7747
7748
7749IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7750{
7751 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7752 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7753 if (pFpuCtx->FTW & RT_BIT(iReg))
7754 {
7755 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7756 return VINF_SUCCESS;
7757 }
7758 return VERR_NOT_FOUND;
7759}
7760
7761
7762IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7763 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7764{
7765 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7766 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7767 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7768 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7769 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7770 {
7771 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7772 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7773 return VINF_SUCCESS;
7774 }
7775 return VERR_NOT_FOUND;
7776}
7777
7778
7779IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7780{
7781 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7782 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7783 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7784 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7785 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7786 {
7787 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7788 return VINF_SUCCESS;
7789 }
7790 return VERR_NOT_FOUND;
7791}
7792
7793
7794/**
7795 * Updates the FPU exception status after FCW is changed.
7796 *
7797 * @param pFpuCtx The FPU context.
7798 */
7799IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7800{
7801 uint16_t u16Fsw = pFpuCtx->FSW;
7802 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7803 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7804 else
7805 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7806 pFpuCtx->FSW = u16Fsw;
7807}
7808
7809
7810/**
7811 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7812 *
7813 * @returns The full FTW.
7814 * @param pFpuCtx The FPU context.
7815 */
7816IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7817{
7818 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7819 uint16_t u16Ftw = 0;
7820 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7821 for (unsigned iSt = 0; iSt < 8; iSt++)
7822 {
7823 unsigned const iReg = (iSt + iTop) & 7;
7824 if (!(u8Ftw & RT_BIT(iReg)))
7825 u16Ftw |= 3 << (iReg * 2); /* empty */
7826 else
7827 {
7828 uint16_t uTag;
7829 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7830 if (pr80Reg->s.uExponent == 0x7fff)
7831 uTag = 2; /* Exponent is all 1's => Special. */
7832 else if (pr80Reg->s.uExponent == 0x0000)
7833 {
7834 if (pr80Reg->s.u64Mantissa == 0x0000)
7835 uTag = 1; /* All bits are zero => Zero. */
7836 else
7837 uTag = 2; /* Must be special. */
7838 }
7839 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7840 uTag = 0; /* Valid. */
7841 else
7842 uTag = 2; /* Must be special. */
7843
7844 u16Ftw |= uTag << (iReg * 2); /* empty */
7845 }
7846 }
7847
7848 return u16Ftw;
7849}
7850
7851
7852/**
7853 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7854 *
7855 * @returns The compressed FTW.
7856 * @param u16FullFtw The full FTW to convert.
7857 */
7858IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7859{
7860 uint8_t u8Ftw = 0;
7861 for (unsigned i = 0; i < 8; i++)
7862 {
7863 if ((u16FullFtw & 3) != 3 /*empty*/)
7864 u8Ftw |= RT_BIT(i);
7865 u16FullFtw >>= 2;
7866 }
7867
7868 return u8Ftw;
7869}
7870
7871/** @} */
7872
7873
7874/** @name Memory access.
7875 *
7876 * @{
7877 */
7878
7879
7880/**
7881 * Updates the IEMCPU::cbWritten counter if applicable.
7882 *
7883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7884 * @param fAccess The access being accounted for.
7885 * @param cbMem The access size.
7886 */
7887DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7888{
7889 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7890 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7891 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7892}
7893
7894
7895/**
7896 * Checks if the given segment can be written to, raise the appropriate
7897 * exception if not.
7898 *
7899 * @returns VBox strict status code.
7900 *
7901 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7902 * @param pHid Pointer to the hidden register.
7903 * @param iSegReg The register number.
7904 * @param pu64BaseAddr Where to return the base address to use for the
7905 * segment. (In 64-bit code it may differ from the
7906 * base in the hidden segment.)
7907 */
7908IEM_STATIC VBOXSTRICTRC
7909iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7910{
7911 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7912
7913 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7914 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7915 else
7916 {
7917 if (!pHid->Attr.n.u1Present)
7918 {
7919 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7920 AssertRelease(uSel == 0);
7921 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7922 return iemRaiseGeneralProtectionFault0(pVCpu);
7923 }
7924
7925 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7926 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7927 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7928 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7929 *pu64BaseAddr = pHid->u64Base;
7930 }
7931 return VINF_SUCCESS;
7932}
7933
7934
7935/**
7936 * Checks if the given segment can be read from, raise the appropriate
7937 * exception if not.
7938 *
7939 * @returns VBox strict status code.
7940 *
7941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7942 * @param pHid Pointer to the hidden register.
7943 * @param iSegReg The register number.
7944 * @param pu64BaseAddr Where to return the base address to use for the
7945 * segment. (In 64-bit code it may differ from the
7946 * base in the hidden segment.)
7947 */
7948IEM_STATIC VBOXSTRICTRC
7949iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7950{
7951 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7952
7953 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7954 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7955 else
7956 {
7957 if (!pHid->Attr.n.u1Present)
7958 {
7959 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7960 AssertRelease(uSel == 0);
7961 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7962 return iemRaiseGeneralProtectionFault0(pVCpu);
7963 }
7964
7965 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7966 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7967 *pu64BaseAddr = pHid->u64Base;
7968 }
7969 return VINF_SUCCESS;
7970}
7971
7972
7973/**
7974 * Applies the segment limit, base and attributes.
7975 *
7976 * This may raise a \#GP or \#SS.
7977 *
7978 * @returns VBox strict status code.
7979 *
7980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7981 * @param fAccess The kind of access which is being performed.
7982 * @param iSegReg The index of the segment register to apply.
7983 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7984 * TSS, ++).
7985 * @param cbMem The access size.
7986 * @param pGCPtrMem Pointer to the guest memory address to apply
7987 * segmentation to. Input and output parameter.
7988 */
7989IEM_STATIC VBOXSTRICTRC
7990iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7991{
7992 if (iSegReg == UINT8_MAX)
7993 return VINF_SUCCESS;
7994
7995 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7996 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7997 switch (pVCpu->iem.s.enmCpuMode)
7998 {
7999 case IEMMODE_16BIT:
8000 case IEMMODE_32BIT:
8001 {
8002 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8003 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8004
8005 if ( pSel->Attr.n.u1Present
8006 && !pSel->Attr.n.u1Unusable)
8007 {
8008 Assert(pSel->Attr.n.u1DescType);
8009 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8010 {
8011 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8012 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8013 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8014
8015 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8016 {
8017 /** @todo CPL check. */
8018 }
8019
8020 /*
8021 * There are two kinds of data selectors, normal and expand down.
8022 */
8023 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8024 {
8025 if ( GCPtrFirst32 > pSel->u32Limit
8026 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8027 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8028 }
8029 else
8030 {
8031 /*
8032 * The upper boundary is defined by the B bit, not the G bit!
8033 */
8034 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8035 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8036 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8037 }
8038 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8039 }
8040 else
8041 {
8042
8043 /*
8044 * Code selector and usually be used to read thru, writing is
8045 * only permitted in real and V8086 mode.
8046 */
8047 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8048 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8049 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8050 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8051 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8052
8053 if ( GCPtrFirst32 > pSel->u32Limit
8054 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8055 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8056
8057 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8058 {
8059 /** @todo CPL check. */
8060 }
8061
8062 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8063 }
8064 }
8065 else
8066 return iemRaiseGeneralProtectionFault0(pVCpu);
8067 return VINF_SUCCESS;
8068 }
8069
8070 case IEMMODE_64BIT:
8071 {
8072 RTGCPTR GCPtrMem = *pGCPtrMem;
8073 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8074 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8075
8076 Assert(cbMem >= 1);
8077 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8078 return VINF_SUCCESS;
8079 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8080 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8081 return iemRaiseGeneralProtectionFault0(pVCpu);
8082 }
8083
8084 default:
8085 AssertFailedReturn(VERR_IEM_IPE_7);
8086 }
8087}
8088
8089
8090/**
8091 * Translates a virtual address to a physical physical address and checks if we
8092 * can access the page as specified.
8093 *
8094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8095 * @param GCPtrMem The virtual address.
8096 * @param fAccess The intended access.
8097 * @param pGCPhysMem Where to return the physical address.
8098 */
8099IEM_STATIC VBOXSTRICTRC
8100iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8101{
8102 /** @todo Need a different PGM interface here. We're currently using
8103 * generic / REM interfaces. this won't cut it for R0 & RC. */
8104 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8105 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8106 RTGCPHYS GCPhys;
8107 uint64_t fFlags;
8108 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8109 if (RT_FAILURE(rc))
8110 {
8111 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8112 /** @todo Check unassigned memory in unpaged mode. */
8113 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8114 *pGCPhysMem = NIL_RTGCPHYS;
8115 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8116 }
8117
8118 /* If the page is writable and does not have the no-exec bit set, all
8119 access is allowed. Otherwise we'll have to check more carefully... */
8120 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8121 {
8122 /* Write to read only memory? */
8123 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8124 && !(fFlags & X86_PTE_RW)
8125 && ( (pVCpu->iem.s.uCpl == 3
8126 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8127 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8128 {
8129 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8130 *pGCPhysMem = NIL_RTGCPHYS;
8131 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8132 }
8133
8134 /* Kernel memory accessed by userland? */
8135 if ( !(fFlags & X86_PTE_US)
8136 && pVCpu->iem.s.uCpl == 3
8137 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8138 {
8139 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8140 *pGCPhysMem = NIL_RTGCPHYS;
8141 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8142 }
8143
8144 /* Executing non-executable memory? */
8145 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8146 && (fFlags & X86_PTE_PAE_NX)
8147 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8148 {
8149 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8150 *pGCPhysMem = NIL_RTGCPHYS;
8151 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8152 VERR_ACCESS_DENIED);
8153 }
8154 }
8155
8156 /*
8157 * Set the dirty / access flags.
8158 * ASSUMES this is set when the address is translated rather than on committ...
8159 */
8160 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8161 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8162 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8163 {
8164 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8165 AssertRC(rc2);
8166 }
8167
8168 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8169 *pGCPhysMem = GCPhys;
8170 return VINF_SUCCESS;
8171}
8172
8173
8174
8175/**
8176 * Maps a physical page.
8177 *
8178 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8180 * @param GCPhysMem The physical address.
8181 * @param fAccess The intended access.
8182 * @param ppvMem Where to return the mapping address.
8183 * @param pLock The PGM lock.
8184 */
8185IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8186{
8187#ifdef IEM_LOG_MEMORY_WRITES
8188 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8189 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8190#endif
8191
8192 /** @todo This API may require some improving later. A private deal with PGM
8193 * regarding locking and unlocking needs to be struct. A couple of TLBs
8194 * living in PGM, but with publicly accessible inlined access methods
8195 * could perhaps be an even better solution. */
8196 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8197 GCPhysMem,
8198 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8199 pVCpu->iem.s.fBypassHandlers,
8200 ppvMem,
8201 pLock);
8202 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8203 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8204
8205 return rc;
8206}
8207
8208
8209/**
8210 * Unmap a page previously mapped by iemMemPageMap.
8211 *
8212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8213 * @param GCPhysMem The physical address.
8214 * @param fAccess The intended access.
8215 * @param pvMem What iemMemPageMap returned.
8216 * @param pLock The PGM lock.
8217 */
8218DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8219{
8220 NOREF(pVCpu);
8221 NOREF(GCPhysMem);
8222 NOREF(fAccess);
8223 NOREF(pvMem);
8224 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8225}
8226
8227
8228/**
8229 * Looks up a memory mapping entry.
8230 *
8231 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8233 * @param pvMem The memory address.
8234 * @param fAccess The access to.
8235 */
8236DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8237{
8238 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8239 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8240 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8241 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8242 return 0;
8243 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8244 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8245 return 1;
8246 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8247 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8248 return 2;
8249 return VERR_NOT_FOUND;
8250}
8251
8252
8253/**
8254 * Finds a free memmap entry when using iNextMapping doesn't work.
8255 *
8256 * @returns Memory mapping index, 1024 on failure.
8257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8258 */
8259IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8260{
8261 /*
8262 * The easy case.
8263 */
8264 if (pVCpu->iem.s.cActiveMappings == 0)
8265 {
8266 pVCpu->iem.s.iNextMapping = 1;
8267 return 0;
8268 }
8269
8270 /* There should be enough mappings for all instructions. */
8271 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8272
8273 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8274 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8275 return i;
8276
8277 AssertFailedReturn(1024);
8278}
8279
8280
8281/**
8282 * Commits a bounce buffer that needs writing back and unmaps it.
8283 *
8284 * @returns Strict VBox status code.
8285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8286 * @param iMemMap The index of the buffer to commit.
8287 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8288 * Always false in ring-3, obviously.
8289 */
8290IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8291{
8292 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8293 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8294#ifdef IN_RING3
8295 Assert(!fPostponeFail);
8296 RT_NOREF_PV(fPostponeFail);
8297#endif
8298
8299 /*
8300 * Do the writing.
8301 */
8302 PVM pVM = pVCpu->CTX_SUFF(pVM);
8303 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8304 {
8305 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8306 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8307 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8308 if (!pVCpu->iem.s.fBypassHandlers)
8309 {
8310 /*
8311 * Carefully and efficiently dealing with access handler return
8312 * codes make this a little bloated.
8313 */
8314 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8315 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8316 pbBuf,
8317 cbFirst,
8318 PGMACCESSORIGIN_IEM);
8319 if (rcStrict == VINF_SUCCESS)
8320 {
8321 if (cbSecond)
8322 {
8323 rcStrict = PGMPhysWrite(pVM,
8324 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8325 pbBuf + cbFirst,
8326 cbSecond,
8327 PGMACCESSORIGIN_IEM);
8328 if (rcStrict == VINF_SUCCESS)
8329 { /* nothing */ }
8330 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8331 {
8332 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8333 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8334 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8335 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8336 }
8337#ifndef IN_RING3
8338 else if (fPostponeFail)
8339 {
8340 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8341 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8342 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8343 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8344 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8345 return iemSetPassUpStatus(pVCpu, rcStrict);
8346 }
8347#endif
8348 else
8349 {
8350 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8351 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8352 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8353 return rcStrict;
8354 }
8355 }
8356 }
8357 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8358 {
8359 if (!cbSecond)
8360 {
8361 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8362 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8363 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8364 }
8365 else
8366 {
8367 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8368 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8369 pbBuf + cbFirst,
8370 cbSecond,
8371 PGMACCESSORIGIN_IEM);
8372 if (rcStrict2 == VINF_SUCCESS)
8373 {
8374 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8377 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8378 }
8379 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8380 {
8381 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8382 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8383 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8384 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8385 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8386 }
8387#ifndef IN_RING3
8388 else if (fPostponeFail)
8389 {
8390 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8391 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8393 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8394 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8395 return iemSetPassUpStatus(pVCpu, rcStrict);
8396 }
8397#endif
8398 else
8399 {
8400 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8402 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8403 return rcStrict2;
8404 }
8405 }
8406 }
8407#ifndef IN_RING3
8408 else if (fPostponeFail)
8409 {
8410 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8411 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8412 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8413 if (!cbSecond)
8414 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8415 else
8416 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8417 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8418 return iemSetPassUpStatus(pVCpu, rcStrict);
8419 }
8420#endif
8421 else
8422 {
8423 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8426 return rcStrict;
8427 }
8428 }
8429 else
8430 {
8431 /*
8432 * No access handlers, much simpler.
8433 */
8434 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8435 if (RT_SUCCESS(rc))
8436 {
8437 if (cbSecond)
8438 {
8439 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8440 if (RT_SUCCESS(rc))
8441 { /* likely */ }
8442 else
8443 {
8444 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8445 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8446 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8447 return rc;
8448 }
8449 }
8450 }
8451 else
8452 {
8453 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8454 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8455 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8456 return rc;
8457 }
8458 }
8459 }
8460
8461#if defined(IEM_LOG_MEMORY_WRITES)
8462 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8463 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8464 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8465 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8466 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8467 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8468
8469 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8470 g_cbIemWrote = cbWrote;
8471 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8472#endif
8473
8474 /*
8475 * Free the mapping entry.
8476 */
8477 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8478 Assert(pVCpu->iem.s.cActiveMappings != 0);
8479 pVCpu->iem.s.cActiveMappings--;
8480 return VINF_SUCCESS;
8481}
8482
8483
8484/**
8485 * iemMemMap worker that deals with a request crossing pages.
8486 */
8487IEM_STATIC VBOXSTRICTRC
8488iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8489{
8490 /*
8491 * Do the address translations.
8492 */
8493 RTGCPHYS GCPhysFirst;
8494 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8495 if (rcStrict != VINF_SUCCESS)
8496 return rcStrict;
8497
8498 RTGCPHYS GCPhysSecond;
8499 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8500 fAccess, &GCPhysSecond);
8501 if (rcStrict != VINF_SUCCESS)
8502 return rcStrict;
8503 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8504
8505 PVM pVM = pVCpu->CTX_SUFF(pVM);
8506
8507 /*
8508 * Read in the current memory content if it's a read, execute or partial
8509 * write access.
8510 */
8511 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8512 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8513 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8514
8515 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8516 {
8517 if (!pVCpu->iem.s.fBypassHandlers)
8518 {
8519 /*
8520 * Must carefully deal with access handler status codes here,
8521 * makes the code a bit bloated.
8522 */
8523 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8524 if (rcStrict == VINF_SUCCESS)
8525 {
8526 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8527 if (rcStrict == VINF_SUCCESS)
8528 { /*likely */ }
8529 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8530 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8531 else
8532 {
8533 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8534 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8535 return rcStrict;
8536 }
8537 }
8538 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8539 {
8540 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8541 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8542 {
8543 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8544 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8545 }
8546 else
8547 {
8548 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8549 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8550 return rcStrict2;
8551 }
8552 }
8553 else
8554 {
8555 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8556 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8557 return rcStrict;
8558 }
8559 }
8560 else
8561 {
8562 /*
8563 * No informational status codes here, much more straight forward.
8564 */
8565 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8566 if (RT_SUCCESS(rc))
8567 {
8568 Assert(rc == VINF_SUCCESS);
8569 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8570 if (RT_SUCCESS(rc))
8571 Assert(rc == VINF_SUCCESS);
8572 else
8573 {
8574 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8575 return rc;
8576 }
8577 }
8578 else
8579 {
8580 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8581 return rc;
8582 }
8583 }
8584 }
8585#ifdef VBOX_STRICT
8586 else
8587 memset(pbBuf, 0xcc, cbMem);
8588 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8589 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8590#endif
8591
8592 /*
8593 * Commit the bounce buffer entry.
8594 */
8595 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8596 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8597 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8598 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8599 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8600 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8601 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8602 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8603 pVCpu->iem.s.cActiveMappings++;
8604
8605 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8606 *ppvMem = pbBuf;
8607 return VINF_SUCCESS;
8608}
8609
8610
8611/**
8612 * iemMemMap woker that deals with iemMemPageMap failures.
8613 */
8614IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8615 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8616{
8617 /*
8618 * Filter out conditions we can handle and the ones which shouldn't happen.
8619 */
8620 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8621 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8622 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8623 {
8624 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8625 return rcMap;
8626 }
8627 pVCpu->iem.s.cPotentialExits++;
8628
8629 /*
8630 * Read in the current memory content if it's a read, execute or partial
8631 * write access.
8632 */
8633 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8634 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8635 {
8636 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8637 memset(pbBuf, 0xff, cbMem);
8638 else
8639 {
8640 int rc;
8641 if (!pVCpu->iem.s.fBypassHandlers)
8642 {
8643 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8644 if (rcStrict == VINF_SUCCESS)
8645 { /* nothing */ }
8646 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8647 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8648 else
8649 {
8650 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8651 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8652 return rcStrict;
8653 }
8654 }
8655 else
8656 {
8657 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8658 if (RT_SUCCESS(rc))
8659 { /* likely */ }
8660 else
8661 {
8662 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8663 GCPhysFirst, rc));
8664 return rc;
8665 }
8666 }
8667 }
8668 }
8669#ifdef VBOX_STRICT
8670 else
8671 memset(pbBuf, 0xcc, cbMem);
8672#endif
8673#ifdef VBOX_STRICT
8674 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8675 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8676#endif
8677
8678 /*
8679 * Commit the bounce buffer entry.
8680 */
8681 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8682 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8683 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8684 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8685 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8686 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8687 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8688 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8689 pVCpu->iem.s.cActiveMappings++;
8690
8691 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8692 *ppvMem = pbBuf;
8693 return VINF_SUCCESS;
8694}
8695
8696
8697
8698/**
8699 * Maps the specified guest memory for the given kind of access.
8700 *
8701 * This may be using bounce buffering of the memory if it's crossing a page
8702 * boundary or if there is an access handler installed for any of it. Because
8703 * of lock prefix guarantees, we're in for some extra clutter when this
8704 * happens.
8705 *
8706 * This may raise a \#GP, \#SS, \#PF or \#AC.
8707 *
8708 * @returns VBox strict status code.
8709 *
8710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8711 * @param ppvMem Where to return the pointer to the mapped
8712 * memory.
8713 * @param cbMem The number of bytes to map. This is usually 1,
8714 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8715 * string operations it can be up to a page.
8716 * @param iSegReg The index of the segment register to use for
8717 * this access. The base and limits are checked.
8718 * Use UINT8_MAX to indicate that no segmentation
8719 * is required (for IDT, GDT and LDT accesses).
8720 * @param GCPtrMem The address of the guest memory.
8721 * @param fAccess How the memory is being accessed. The
8722 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8723 * how to map the memory, while the
8724 * IEM_ACCESS_WHAT_XXX bit is used when raising
8725 * exceptions.
8726 */
8727IEM_STATIC VBOXSTRICTRC
8728iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8729{
8730 /*
8731 * Check the input and figure out which mapping entry to use.
8732 */
8733 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8734 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8735 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8736
8737 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8738 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8739 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8740 {
8741 iMemMap = iemMemMapFindFree(pVCpu);
8742 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8743 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8744 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8745 pVCpu->iem.s.aMemMappings[2].fAccess),
8746 VERR_IEM_IPE_9);
8747 }
8748
8749 /*
8750 * Map the memory, checking that we can actually access it. If something
8751 * slightly complicated happens, fall back on bounce buffering.
8752 */
8753 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8754 if (rcStrict != VINF_SUCCESS)
8755 return rcStrict;
8756
8757 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8758 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8759
8760 RTGCPHYS GCPhysFirst;
8761 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8762 if (rcStrict != VINF_SUCCESS)
8763 return rcStrict;
8764
8765 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8766 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8767 if (fAccess & IEM_ACCESS_TYPE_READ)
8768 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8769
8770 void *pvMem;
8771 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8772 if (rcStrict != VINF_SUCCESS)
8773 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8774
8775 /*
8776 * Fill in the mapping table entry.
8777 */
8778 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8779 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8780 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8781 pVCpu->iem.s.cActiveMappings++;
8782
8783 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8784 *ppvMem = pvMem;
8785 return VINF_SUCCESS;
8786}
8787
8788
8789/**
8790 * Commits the guest memory if bounce buffered and unmaps it.
8791 *
8792 * @returns Strict VBox status code.
8793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8794 * @param pvMem The mapping.
8795 * @param fAccess The kind of access.
8796 */
8797IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8798{
8799 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8800 AssertReturn(iMemMap >= 0, iMemMap);
8801
8802 /* If it's bounce buffered, we may need to write back the buffer. */
8803 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8804 {
8805 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8806 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8807 }
8808 /* Otherwise unlock it. */
8809 else
8810 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8811
8812 /* Free the entry. */
8813 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8814 Assert(pVCpu->iem.s.cActiveMappings != 0);
8815 pVCpu->iem.s.cActiveMappings--;
8816 return VINF_SUCCESS;
8817}
8818
8819#ifdef IEM_WITH_SETJMP
8820
8821/**
8822 * Maps the specified guest memory for the given kind of access, longjmp on
8823 * error.
8824 *
8825 * This may be using bounce buffering of the memory if it's crossing a page
8826 * boundary or if there is an access handler installed for any of it. Because
8827 * of lock prefix guarantees, we're in for some extra clutter when this
8828 * happens.
8829 *
8830 * This may raise a \#GP, \#SS, \#PF or \#AC.
8831 *
8832 * @returns Pointer to the mapped memory.
8833 *
8834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8835 * @param cbMem The number of bytes to map. This is usually 1,
8836 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8837 * string operations it can be up to a page.
8838 * @param iSegReg The index of the segment register to use for
8839 * this access. The base and limits are checked.
8840 * Use UINT8_MAX to indicate that no segmentation
8841 * is required (for IDT, GDT and LDT accesses).
8842 * @param GCPtrMem The address of the guest memory.
8843 * @param fAccess How the memory is being accessed. The
8844 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8845 * how to map the memory, while the
8846 * IEM_ACCESS_WHAT_XXX bit is used when raising
8847 * exceptions.
8848 */
8849IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8850{
8851 /*
8852 * Check the input and figure out which mapping entry to use.
8853 */
8854 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8855 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8856 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8857
8858 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8859 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8860 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8861 {
8862 iMemMap = iemMemMapFindFree(pVCpu);
8863 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8864 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8865 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8866 pVCpu->iem.s.aMemMappings[2].fAccess),
8867 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8868 }
8869
8870 /*
8871 * Map the memory, checking that we can actually access it. If something
8872 * slightly complicated happens, fall back on bounce buffering.
8873 */
8874 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8875 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8876 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8877
8878 /* Crossing a page boundary? */
8879 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8880 { /* No (likely). */ }
8881 else
8882 {
8883 void *pvMem;
8884 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8885 if (rcStrict == VINF_SUCCESS)
8886 return pvMem;
8887 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8888 }
8889
8890 RTGCPHYS GCPhysFirst;
8891 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8892 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8893 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8894
8895 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8896 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8897 if (fAccess & IEM_ACCESS_TYPE_READ)
8898 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8899
8900 void *pvMem;
8901 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8902 if (rcStrict == VINF_SUCCESS)
8903 { /* likely */ }
8904 else
8905 {
8906 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8907 if (rcStrict == VINF_SUCCESS)
8908 return pvMem;
8909 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8910 }
8911
8912 /*
8913 * Fill in the mapping table entry.
8914 */
8915 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8916 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8917 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8918 pVCpu->iem.s.cActiveMappings++;
8919
8920 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8921 return pvMem;
8922}
8923
8924
8925/**
8926 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8927 *
8928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8929 * @param pvMem The mapping.
8930 * @param fAccess The kind of access.
8931 */
8932IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8933{
8934 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8935 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8936
8937 /* If it's bounce buffered, we may need to write back the buffer. */
8938 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8939 {
8940 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8941 {
8942 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8943 if (rcStrict == VINF_SUCCESS)
8944 return;
8945 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8946 }
8947 }
8948 /* Otherwise unlock it. */
8949 else
8950 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8951
8952 /* Free the entry. */
8953 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8954 Assert(pVCpu->iem.s.cActiveMappings != 0);
8955 pVCpu->iem.s.cActiveMappings--;
8956}
8957
8958#endif /* IEM_WITH_SETJMP */
8959
8960#ifndef IN_RING3
8961/**
8962 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8963 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8964 *
8965 * Allows the instruction to be completed and retired, while the IEM user will
8966 * return to ring-3 immediately afterwards and do the postponed writes there.
8967 *
8968 * @returns VBox status code (no strict statuses). Caller must check
8969 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8971 * @param pvMem The mapping.
8972 * @param fAccess The kind of access.
8973 */
8974IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8975{
8976 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8977 AssertReturn(iMemMap >= 0, iMemMap);
8978
8979 /* If it's bounce buffered, we may need to write back the buffer. */
8980 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8981 {
8982 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8983 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8984 }
8985 /* Otherwise unlock it. */
8986 else
8987 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8988
8989 /* Free the entry. */
8990 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8991 Assert(pVCpu->iem.s.cActiveMappings != 0);
8992 pVCpu->iem.s.cActiveMappings--;
8993 return VINF_SUCCESS;
8994}
8995#endif
8996
8997
8998/**
8999 * Rollbacks mappings, releasing page locks and such.
9000 *
9001 * The caller shall only call this after checking cActiveMappings.
9002 *
9003 * @returns Strict VBox status code to pass up.
9004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9005 */
9006IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9007{
9008 Assert(pVCpu->iem.s.cActiveMappings > 0);
9009
9010 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9011 while (iMemMap-- > 0)
9012 {
9013 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9014 if (fAccess != IEM_ACCESS_INVALID)
9015 {
9016 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9017 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9018 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9019 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9020 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9021 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9022 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9023 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9024 pVCpu->iem.s.cActiveMappings--;
9025 }
9026 }
9027}
9028
9029
9030/**
9031 * Fetches a data byte.
9032 *
9033 * @returns Strict VBox status code.
9034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9035 * @param pu8Dst Where to return the byte.
9036 * @param iSegReg The index of the segment register to use for
9037 * this access. The base and limits are checked.
9038 * @param GCPtrMem The address of the guest memory.
9039 */
9040IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9041{
9042 /* The lazy approach for now... */
9043 uint8_t const *pu8Src;
9044 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9045 if (rc == VINF_SUCCESS)
9046 {
9047 *pu8Dst = *pu8Src;
9048 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9049 }
9050 return rc;
9051}
9052
9053
9054#ifdef IEM_WITH_SETJMP
9055/**
9056 * Fetches a data byte, longjmp on error.
9057 *
9058 * @returns The byte.
9059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9060 * @param iSegReg The index of the segment register to use for
9061 * this access. The base and limits are checked.
9062 * @param GCPtrMem The address of the guest memory.
9063 */
9064DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9065{
9066 /* The lazy approach for now... */
9067 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9068 uint8_t const bRet = *pu8Src;
9069 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9070 return bRet;
9071}
9072#endif /* IEM_WITH_SETJMP */
9073
9074
9075/**
9076 * Fetches a data word.
9077 *
9078 * @returns Strict VBox status code.
9079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9080 * @param pu16Dst Where to return the word.
9081 * @param iSegReg The index of the segment register to use for
9082 * this access. The base and limits are checked.
9083 * @param GCPtrMem The address of the guest memory.
9084 */
9085IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9086{
9087 /* The lazy approach for now... */
9088 uint16_t const *pu16Src;
9089 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9090 if (rc == VINF_SUCCESS)
9091 {
9092 *pu16Dst = *pu16Src;
9093 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9094 }
9095 return rc;
9096}
9097
9098
9099#ifdef IEM_WITH_SETJMP
9100/**
9101 * Fetches a data word, longjmp on error.
9102 *
9103 * @returns The word
9104 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9105 * @param iSegReg The index of the segment register to use for
9106 * this access. The base and limits are checked.
9107 * @param GCPtrMem The address of the guest memory.
9108 */
9109DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9110{
9111 /* The lazy approach for now... */
9112 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9113 uint16_t const u16Ret = *pu16Src;
9114 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9115 return u16Ret;
9116}
9117#endif
9118
9119
9120/**
9121 * Fetches a data dword.
9122 *
9123 * @returns Strict VBox status code.
9124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9125 * @param pu32Dst Where to return the dword.
9126 * @param iSegReg The index of the segment register to use for
9127 * this access. The base and limits are checked.
9128 * @param GCPtrMem The address of the guest memory.
9129 */
9130IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9131{
9132 /* The lazy approach for now... */
9133 uint32_t const *pu32Src;
9134 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9135 if (rc == VINF_SUCCESS)
9136 {
9137 *pu32Dst = *pu32Src;
9138 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9139 }
9140 return rc;
9141}
9142
9143
9144#ifdef IEM_WITH_SETJMP
9145
9146IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9147{
9148 Assert(cbMem >= 1);
9149 Assert(iSegReg < X86_SREG_COUNT);
9150
9151 /*
9152 * 64-bit mode is simpler.
9153 */
9154 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9155 {
9156 if (iSegReg >= X86_SREG_FS)
9157 {
9158 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9159 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9160 GCPtrMem += pSel->u64Base;
9161 }
9162
9163 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9164 return GCPtrMem;
9165 }
9166 /*
9167 * 16-bit and 32-bit segmentation.
9168 */
9169 else
9170 {
9171 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9172 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9173 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9174 == X86DESCATTR_P /* data, expand up */
9175 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9176 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9177 {
9178 /* expand up */
9179 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9180 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9181 && GCPtrLast32 > (uint32_t)GCPtrMem))
9182 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9183 }
9184 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9185 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9186 {
9187 /* expand down */
9188 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9189 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9190 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9191 && GCPtrLast32 > (uint32_t)GCPtrMem))
9192 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9193 }
9194 else
9195 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9196 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9197 }
9198 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9199}
9200
9201
9202IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9203{
9204 Assert(cbMem >= 1);
9205 Assert(iSegReg < X86_SREG_COUNT);
9206
9207 /*
9208 * 64-bit mode is simpler.
9209 */
9210 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9211 {
9212 if (iSegReg >= X86_SREG_FS)
9213 {
9214 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9215 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9216 GCPtrMem += pSel->u64Base;
9217 }
9218
9219 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9220 return GCPtrMem;
9221 }
9222 /*
9223 * 16-bit and 32-bit segmentation.
9224 */
9225 else
9226 {
9227 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9228 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9229 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9230 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9231 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9232 {
9233 /* expand up */
9234 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9235 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9236 && GCPtrLast32 > (uint32_t)GCPtrMem))
9237 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9238 }
9239 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9240 {
9241 /* expand down */
9242 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9243 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9244 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9245 && GCPtrLast32 > (uint32_t)GCPtrMem))
9246 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9247 }
9248 else
9249 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9250 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9251 }
9252 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9253}
9254
9255
9256/**
9257 * Fetches a data dword, longjmp on error, fallback/safe version.
9258 *
9259 * @returns The dword
9260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9261 * @param iSegReg The index of the segment register to use for
9262 * this access. The base and limits are checked.
9263 * @param GCPtrMem The address of the guest memory.
9264 */
9265IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9266{
9267 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9268 uint32_t const u32Ret = *pu32Src;
9269 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9270 return u32Ret;
9271}
9272
9273
9274/**
9275 * Fetches a data dword, longjmp on error.
9276 *
9277 * @returns The dword
9278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9279 * @param iSegReg The index of the segment register to use for
9280 * this access. The base and limits are checked.
9281 * @param GCPtrMem The address of the guest memory.
9282 */
9283DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9284{
9285# ifdef IEM_WITH_DATA_TLB
9286 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9287 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9288 {
9289 /// @todo more later.
9290 }
9291
9292 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9293# else
9294 /* The lazy approach. */
9295 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9296 uint32_t const u32Ret = *pu32Src;
9297 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9298 return u32Ret;
9299# endif
9300}
9301#endif
9302
9303
9304#ifdef SOME_UNUSED_FUNCTION
9305/**
9306 * Fetches a data dword and sign extends it to a qword.
9307 *
9308 * @returns Strict VBox status code.
9309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9310 * @param pu64Dst Where to return the sign extended value.
9311 * @param iSegReg The index of the segment register to use for
9312 * this access. The base and limits are checked.
9313 * @param GCPtrMem The address of the guest memory.
9314 */
9315IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9316{
9317 /* The lazy approach for now... */
9318 int32_t const *pi32Src;
9319 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9320 if (rc == VINF_SUCCESS)
9321 {
9322 *pu64Dst = *pi32Src;
9323 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9324 }
9325#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9326 else
9327 *pu64Dst = 0;
9328#endif
9329 return rc;
9330}
9331#endif
9332
9333
9334/**
9335 * Fetches a data qword.
9336 *
9337 * @returns Strict VBox status code.
9338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9339 * @param pu64Dst Where to return the qword.
9340 * @param iSegReg The index of the segment register to use for
9341 * this access. The base and limits are checked.
9342 * @param GCPtrMem The address of the guest memory.
9343 */
9344IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9345{
9346 /* The lazy approach for now... */
9347 uint64_t const *pu64Src;
9348 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9349 if (rc == VINF_SUCCESS)
9350 {
9351 *pu64Dst = *pu64Src;
9352 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9353 }
9354 return rc;
9355}
9356
9357
9358#ifdef IEM_WITH_SETJMP
9359/**
9360 * Fetches a data qword, longjmp on error.
9361 *
9362 * @returns The qword.
9363 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9364 * @param iSegReg The index of the segment register to use for
9365 * this access. The base and limits are checked.
9366 * @param GCPtrMem The address of the guest memory.
9367 */
9368DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9369{
9370 /* The lazy approach for now... */
9371 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9372 uint64_t const u64Ret = *pu64Src;
9373 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9374 return u64Ret;
9375}
9376#endif
9377
9378
9379/**
9380 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9381 *
9382 * @returns Strict VBox status code.
9383 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9384 * @param pu64Dst Where to return the qword.
9385 * @param iSegReg The index of the segment register to use for
9386 * this access. The base and limits are checked.
9387 * @param GCPtrMem The address of the guest memory.
9388 */
9389IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9390{
9391 /* The lazy approach for now... */
9392 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9393 if (RT_UNLIKELY(GCPtrMem & 15))
9394 return iemRaiseGeneralProtectionFault0(pVCpu);
9395
9396 uint64_t const *pu64Src;
9397 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9398 if (rc == VINF_SUCCESS)
9399 {
9400 *pu64Dst = *pu64Src;
9401 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9402 }
9403 return rc;
9404}
9405
9406
9407#ifdef IEM_WITH_SETJMP
9408/**
9409 * Fetches a data qword, longjmp on error.
9410 *
9411 * @returns The qword.
9412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9413 * @param iSegReg The index of the segment register to use for
9414 * this access. The base and limits are checked.
9415 * @param GCPtrMem The address of the guest memory.
9416 */
9417DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9418{
9419 /* The lazy approach for now... */
9420 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9421 if (RT_LIKELY(!(GCPtrMem & 15)))
9422 {
9423 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9424 uint64_t const u64Ret = *pu64Src;
9425 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9426 return u64Ret;
9427 }
9428
9429 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9430 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9431}
9432#endif
9433
9434
9435/**
9436 * Fetches a data tword.
9437 *
9438 * @returns Strict VBox status code.
9439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9440 * @param pr80Dst Where to return the tword.
9441 * @param iSegReg The index of the segment register to use for
9442 * this access. The base and limits are checked.
9443 * @param GCPtrMem The address of the guest memory.
9444 */
9445IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9446{
9447 /* The lazy approach for now... */
9448 PCRTFLOAT80U pr80Src;
9449 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9450 if (rc == VINF_SUCCESS)
9451 {
9452 *pr80Dst = *pr80Src;
9453 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9454 }
9455 return rc;
9456}
9457
9458
9459#ifdef IEM_WITH_SETJMP
9460/**
9461 * Fetches a data tword, longjmp on error.
9462 *
9463 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9464 * @param pr80Dst Where to return the tword.
9465 * @param iSegReg The index of the segment register to use for
9466 * this access. The base and limits are checked.
9467 * @param GCPtrMem The address of the guest memory.
9468 */
9469DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9470{
9471 /* The lazy approach for now... */
9472 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9473 *pr80Dst = *pr80Src;
9474 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9475}
9476#endif
9477
9478
9479/**
9480 * Fetches a data dqword (double qword), generally SSE related.
9481 *
9482 * @returns Strict VBox status code.
9483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9484 * @param pu128Dst Where to return the qword.
9485 * @param iSegReg The index of the segment register to use for
9486 * this access. The base and limits are checked.
9487 * @param GCPtrMem The address of the guest memory.
9488 */
9489IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9490{
9491 /* The lazy approach for now... */
9492 PCRTUINT128U pu128Src;
9493 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9494 if (rc == VINF_SUCCESS)
9495 {
9496 pu128Dst->au64[0] = pu128Src->au64[0];
9497 pu128Dst->au64[1] = pu128Src->au64[1];
9498 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9499 }
9500 return rc;
9501}
9502
9503
9504#ifdef IEM_WITH_SETJMP
9505/**
9506 * Fetches a data dqword (double qword), generally SSE related.
9507 *
9508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9509 * @param pu128Dst Where to return the qword.
9510 * @param iSegReg The index of the segment register to use for
9511 * this access. The base and limits are checked.
9512 * @param GCPtrMem The address of the guest memory.
9513 */
9514IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9515{
9516 /* The lazy approach for now... */
9517 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9518 pu128Dst->au64[0] = pu128Src->au64[0];
9519 pu128Dst->au64[1] = pu128Src->au64[1];
9520 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9521}
9522#endif
9523
9524
9525/**
9526 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9527 * related.
9528 *
9529 * Raises \#GP(0) if not aligned.
9530 *
9531 * @returns Strict VBox status code.
9532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9533 * @param pu128Dst Where to return the qword.
9534 * @param iSegReg The index of the segment register to use for
9535 * this access. The base and limits are checked.
9536 * @param GCPtrMem The address of the guest memory.
9537 */
9538IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9539{
9540 /* The lazy approach for now... */
9541 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9542 if ( (GCPtrMem & 15)
9543 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9544 return iemRaiseGeneralProtectionFault0(pVCpu);
9545
9546 PCRTUINT128U pu128Src;
9547 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9548 if (rc == VINF_SUCCESS)
9549 {
9550 pu128Dst->au64[0] = pu128Src->au64[0];
9551 pu128Dst->au64[1] = pu128Src->au64[1];
9552 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9553 }
9554 return rc;
9555}
9556
9557
9558#ifdef IEM_WITH_SETJMP
9559/**
9560 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9561 * related, longjmp on error.
9562 *
9563 * Raises \#GP(0) if not aligned.
9564 *
9565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9566 * @param pu128Dst Where to return the qword.
9567 * @param iSegReg The index of the segment register to use for
9568 * this access. The base and limits are checked.
9569 * @param GCPtrMem The address of the guest memory.
9570 */
9571DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9572{
9573 /* The lazy approach for now... */
9574 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9575 if ( (GCPtrMem & 15) == 0
9576 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9577 {
9578 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9579 pu128Dst->au64[0] = pu128Src->au64[0];
9580 pu128Dst->au64[1] = pu128Src->au64[1];
9581 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9582 return;
9583 }
9584
9585 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9586 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9587}
9588#endif
9589
9590
9591/**
9592 * Fetches a data oword (octo word), generally AVX related.
9593 *
9594 * @returns Strict VBox status code.
9595 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9596 * @param pu256Dst Where to return the qword.
9597 * @param iSegReg The index of the segment register to use for
9598 * this access. The base and limits are checked.
9599 * @param GCPtrMem The address of the guest memory.
9600 */
9601IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9602{
9603 /* The lazy approach for now... */
9604 PCRTUINT256U pu256Src;
9605 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9606 if (rc == VINF_SUCCESS)
9607 {
9608 pu256Dst->au64[0] = pu256Src->au64[0];
9609 pu256Dst->au64[1] = pu256Src->au64[1];
9610 pu256Dst->au64[2] = pu256Src->au64[2];
9611 pu256Dst->au64[3] = pu256Src->au64[3];
9612 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9613 }
9614 return rc;
9615}
9616
9617
9618#ifdef IEM_WITH_SETJMP
9619/**
9620 * Fetches a data oword (octo word), generally AVX related.
9621 *
9622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9623 * @param pu256Dst Where to return the qword.
9624 * @param iSegReg The index of the segment register to use for
9625 * this access. The base and limits are checked.
9626 * @param GCPtrMem The address of the guest memory.
9627 */
9628IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9629{
9630 /* The lazy approach for now... */
9631 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9632 pu256Dst->au64[0] = pu256Src->au64[0];
9633 pu256Dst->au64[1] = pu256Src->au64[1];
9634 pu256Dst->au64[2] = pu256Src->au64[2];
9635 pu256Dst->au64[3] = pu256Src->au64[3];
9636 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9637}
9638#endif
9639
9640
9641/**
9642 * Fetches a data oword (octo word) at an aligned address, generally AVX
9643 * related.
9644 *
9645 * Raises \#GP(0) if not aligned.
9646 *
9647 * @returns Strict VBox status code.
9648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9649 * @param pu256Dst Where to return the qword.
9650 * @param iSegReg The index of the segment register to use for
9651 * this access. The base and limits are checked.
9652 * @param GCPtrMem The address of the guest memory.
9653 */
9654IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9655{
9656 /* The lazy approach for now... */
9657 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9658 if (GCPtrMem & 31)
9659 return iemRaiseGeneralProtectionFault0(pVCpu);
9660
9661 PCRTUINT256U pu256Src;
9662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9663 if (rc == VINF_SUCCESS)
9664 {
9665 pu256Dst->au64[0] = pu256Src->au64[0];
9666 pu256Dst->au64[1] = pu256Src->au64[1];
9667 pu256Dst->au64[2] = pu256Src->au64[2];
9668 pu256Dst->au64[3] = pu256Src->au64[3];
9669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9670 }
9671 return rc;
9672}
9673
9674
9675#ifdef IEM_WITH_SETJMP
9676/**
9677 * Fetches a data oword (octo word) at an aligned address, generally AVX
9678 * related, longjmp on error.
9679 *
9680 * Raises \#GP(0) if not aligned.
9681 *
9682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9683 * @param pu256Dst Where to return the qword.
9684 * @param iSegReg The index of the segment register to use for
9685 * this access. The base and limits are checked.
9686 * @param GCPtrMem The address of the guest memory.
9687 */
9688DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9689{
9690 /* The lazy approach for now... */
9691 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9692 if ((GCPtrMem & 31) == 0)
9693 {
9694 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9695 pu256Dst->au64[0] = pu256Src->au64[0];
9696 pu256Dst->au64[1] = pu256Src->au64[1];
9697 pu256Dst->au64[2] = pu256Src->au64[2];
9698 pu256Dst->au64[3] = pu256Src->au64[3];
9699 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9700 return;
9701 }
9702
9703 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9704 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9705}
9706#endif
9707
9708
9709
9710/**
9711 * Fetches a descriptor register (lgdt, lidt).
9712 *
9713 * @returns Strict VBox status code.
9714 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9715 * @param pcbLimit Where to return the limit.
9716 * @param pGCPtrBase Where to return the base.
9717 * @param iSegReg The index of the segment register to use for
9718 * this access. The base and limits are checked.
9719 * @param GCPtrMem The address of the guest memory.
9720 * @param enmOpSize The effective operand size.
9721 */
9722IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9723 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9724{
9725 /*
9726 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9727 * little special:
9728 * - The two reads are done separately.
9729 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9730 * - We suspect the 386 to actually commit the limit before the base in
9731 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9732 * don't try emulate this eccentric behavior, because it's not well
9733 * enough understood and rather hard to trigger.
9734 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9735 */
9736 VBOXSTRICTRC rcStrict;
9737 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9738 {
9739 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9740 if (rcStrict == VINF_SUCCESS)
9741 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9742 }
9743 else
9744 {
9745 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9746 if (enmOpSize == IEMMODE_32BIT)
9747 {
9748 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9749 {
9750 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9751 if (rcStrict == VINF_SUCCESS)
9752 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9753 }
9754 else
9755 {
9756 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9757 if (rcStrict == VINF_SUCCESS)
9758 {
9759 *pcbLimit = (uint16_t)uTmp;
9760 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9761 }
9762 }
9763 if (rcStrict == VINF_SUCCESS)
9764 *pGCPtrBase = uTmp;
9765 }
9766 else
9767 {
9768 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9769 if (rcStrict == VINF_SUCCESS)
9770 {
9771 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9772 if (rcStrict == VINF_SUCCESS)
9773 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9774 }
9775 }
9776 }
9777 return rcStrict;
9778}
9779
9780
9781
9782/**
9783 * Stores a data byte.
9784 *
9785 * @returns Strict VBox status code.
9786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9787 * @param iSegReg The index of the segment register to use for
9788 * this access. The base and limits are checked.
9789 * @param GCPtrMem The address of the guest memory.
9790 * @param u8Value The value to store.
9791 */
9792IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9793{
9794 /* The lazy approach for now... */
9795 uint8_t *pu8Dst;
9796 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9797 if (rc == VINF_SUCCESS)
9798 {
9799 *pu8Dst = u8Value;
9800 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9801 }
9802 return rc;
9803}
9804
9805
9806#ifdef IEM_WITH_SETJMP
9807/**
9808 * Stores a data byte, longjmp on error.
9809 *
9810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9811 * @param iSegReg The index of the segment register to use for
9812 * this access. The base and limits are checked.
9813 * @param GCPtrMem The address of the guest memory.
9814 * @param u8Value The value to store.
9815 */
9816IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9817{
9818 /* The lazy approach for now... */
9819 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9820 *pu8Dst = u8Value;
9821 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9822}
9823#endif
9824
9825
9826/**
9827 * Stores a data word.
9828 *
9829 * @returns Strict VBox status code.
9830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9831 * @param iSegReg The index of the segment register to use for
9832 * this access. The base and limits are checked.
9833 * @param GCPtrMem The address of the guest memory.
9834 * @param u16Value The value to store.
9835 */
9836IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9837{
9838 /* The lazy approach for now... */
9839 uint16_t *pu16Dst;
9840 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9841 if (rc == VINF_SUCCESS)
9842 {
9843 *pu16Dst = u16Value;
9844 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9845 }
9846 return rc;
9847}
9848
9849
9850#ifdef IEM_WITH_SETJMP
9851/**
9852 * Stores a data word, longjmp on error.
9853 *
9854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9855 * @param iSegReg The index of the segment register to use for
9856 * this access. The base and limits are checked.
9857 * @param GCPtrMem The address of the guest memory.
9858 * @param u16Value The value to store.
9859 */
9860IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9861{
9862 /* The lazy approach for now... */
9863 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9864 *pu16Dst = u16Value;
9865 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9866}
9867#endif
9868
9869
9870/**
9871 * Stores a data dword.
9872 *
9873 * @returns Strict VBox status code.
9874 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9875 * @param iSegReg The index of the segment register to use for
9876 * this access. The base and limits are checked.
9877 * @param GCPtrMem The address of the guest memory.
9878 * @param u32Value The value to store.
9879 */
9880IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9881{
9882 /* The lazy approach for now... */
9883 uint32_t *pu32Dst;
9884 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9885 if (rc == VINF_SUCCESS)
9886 {
9887 *pu32Dst = u32Value;
9888 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9889 }
9890 return rc;
9891}
9892
9893
9894#ifdef IEM_WITH_SETJMP
9895/**
9896 * Stores a data dword.
9897 *
9898 * @returns Strict VBox status code.
9899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9900 * @param iSegReg The index of the segment register to use for
9901 * this access. The base and limits are checked.
9902 * @param GCPtrMem The address of the guest memory.
9903 * @param u32Value The value to store.
9904 */
9905IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9906{
9907 /* The lazy approach for now... */
9908 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9909 *pu32Dst = u32Value;
9910 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9911}
9912#endif
9913
9914
9915/**
9916 * Stores a data qword.
9917 *
9918 * @returns Strict VBox status code.
9919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9920 * @param iSegReg The index of the segment register to use for
9921 * this access. The base and limits are checked.
9922 * @param GCPtrMem The address of the guest memory.
9923 * @param u64Value The value to store.
9924 */
9925IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9926{
9927 /* The lazy approach for now... */
9928 uint64_t *pu64Dst;
9929 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9930 if (rc == VINF_SUCCESS)
9931 {
9932 *pu64Dst = u64Value;
9933 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9934 }
9935 return rc;
9936}
9937
9938
9939#ifdef IEM_WITH_SETJMP
9940/**
9941 * Stores a data qword, longjmp on error.
9942 *
9943 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9944 * @param iSegReg The index of the segment register to use for
9945 * this access. The base and limits are checked.
9946 * @param GCPtrMem The address of the guest memory.
9947 * @param u64Value The value to store.
9948 */
9949IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9950{
9951 /* The lazy approach for now... */
9952 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9953 *pu64Dst = u64Value;
9954 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9955}
9956#endif
9957
9958
9959/**
9960 * Stores a data dqword.
9961 *
9962 * @returns Strict VBox status code.
9963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9964 * @param iSegReg The index of the segment register to use for
9965 * this access. The base and limits are checked.
9966 * @param GCPtrMem The address of the guest memory.
9967 * @param u128Value The value to store.
9968 */
9969IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9970{
9971 /* The lazy approach for now... */
9972 PRTUINT128U pu128Dst;
9973 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9974 if (rc == VINF_SUCCESS)
9975 {
9976 pu128Dst->au64[0] = u128Value.au64[0];
9977 pu128Dst->au64[1] = u128Value.au64[1];
9978 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9979 }
9980 return rc;
9981}
9982
9983
9984#ifdef IEM_WITH_SETJMP
9985/**
9986 * Stores a data dqword, longjmp on error.
9987 *
9988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9989 * @param iSegReg The index of the segment register to use for
9990 * this access. The base and limits are checked.
9991 * @param GCPtrMem The address of the guest memory.
9992 * @param u128Value The value to store.
9993 */
9994IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9995{
9996 /* The lazy approach for now... */
9997 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9998 pu128Dst->au64[0] = u128Value.au64[0];
9999 pu128Dst->au64[1] = u128Value.au64[1];
10000 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10001}
10002#endif
10003
10004
10005/**
10006 * Stores a data dqword, SSE aligned.
10007 *
10008 * @returns Strict VBox status code.
10009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10010 * @param iSegReg The index of the segment register to use for
10011 * this access. The base and limits are checked.
10012 * @param GCPtrMem The address of the guest memory.
10013 * @param u128Value The value to store.
10014 */
10015IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10016{
10017 /* The lazy approach for now... */
10018 if ( (GCPtrMem & 15)
10019 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10020 return iemRaiseGeneralProtectionFault0(pVCpu);
10021
10022 PRTUINT128U pu128Dst;
10023 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10024 if (rc == VINF_SUCCESS)
10025 {
10026 pu128Dst->au64[0] = u128Value.au64[0];
10027 pu128Dst->au64[1] = u128Value.au64[1];
10028 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10029 }
10030 return rc;
10031}
10032
10033
10034#ifdef IEM_WITH_SETJMP
10035/**
10036 * Stores a data dqword, SSE aligned.
10037 *
10038 * @returns Strict VBox status code.
10039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10040 * @param iSegReg The index of the segment register to use for
10041 * this access. The base and limits are checked.
10042 * @param GCPtrMem The address of the guest memory.
10043 * @param u128Value The value to store.
10044 */
10045DECL_NO_INLINE(IEM_STATIC, void)
10046iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10047{
10048 /* The lazy approach for now... */
10049 if ( (GCPtrMem & 15) == 0
10050 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10051 {
10052 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10053 pu128Dst->au64[0] = u128Value.au64[0];
10054 pu128Dst->au64[1] = u128Value.au64[1];
10055 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10056 return;
10057 }
10058
10059 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10060 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10061}
10062#endif
10063
10064
10065/**
10066 * Stores a data dqword.
10067 *
10068 * @returns Strict VBox status code.
10069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10070 * @param iSegReg The index of the segment register to use for
10071 * this access. The base and limits are checked.
10072 * @param GCPtrMem The address of the guest memory.
10073 * @param pu256Value Pointer to the value to store.
10074 */
10075IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10076{
10077 /* The lazy approach for now... */
10078 PRTUINT256U pu256Dst;
10079 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10080 if (rc == VINF_SUCCESS)
10081 {
10082 pu256Dst->au64[0] = pu256Value->au64[0];
10083 pu256Dst->au64[1] = pu256Value->au64[1];
10084 pu256Dst->au64[2] = pu256Value->au64[2];
10085 pu256Dst->au64[3] = pu256Value->au64[3];
10086 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10087 }
10088 return rc;
10089}
10090
10091
10092#ifdef IEM_WITH_SETJMP
10093/**
10094 * Stores a data dqword, longjmp on error.
10095 *
10096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10097 * @param iSegReg The index of the segment register to use for
10098 * this access. The base and limits are checked.
10099 * @param GCPtrMem The address of the guest memory.
10100 * @param pu256Value Pointer to the value to store.
10101 */
10102IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10103{
10104 /* The lazy approach for now... */
10105 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10106 pu256Dst->au64[0] = pu256Value->au64[0];
10107 pu256Dst->au64[1] = pu256Value->au64[1];
10108 pu256Dst->au64[2] = pu256Value->au64[2];
10109 pu256Dst->au64[3] = pu256Value->au64[3];
10110 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10111}
10112#endif
10113
10114
10115/**
10116 * Stores a data dqword, AVX aligned.
10117 *
10118 * @returns Strict VBox status code.
10119 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10120 * @param iSegReg The index of the segment register to use for
10121 * this access. The base and limits are checked.
10122 * @param GCPtrMem The address of the guest memory.
10123 * @param pu256Value Pointer to the value to store.
10124 */
10125IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10126{
10127 /* The lazy approach for now... */
10128 if (GCPtrMem & 31)
10129 return iemRaiseGeneralProtectionFault0(pVCpu);
10130
10131 PRTUINT256U pu256Dst;
10132 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10133 if (rc == VINF_SUCCESS)
10134 {
10135 pu256Dst->au64[0] = pu256Value->au64[0];
10136 pu256Dst->au64[1] = pu256Value->au64[1];
10137 pu256Dst->au64[2] = pu256Value->au64[2];
10138 pu256Dst->au64[3] = pu256Value->au64[3];
10139 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10140 }
10141 return rc;
10142}
10143
10144
10145#ifdef IEM_WITH_SETJMP
10146/**
10147 * Stores a data dqword, AVX aligned.
10148 *
10149 * @returns Strict VBox status code.
10150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10151 * @param iSegReg The index of the segment register to use for
10152 * this access. The base and limits are checked.
10153 * @param GCPtrMem The address of the guest memory.
10154 * @param pu256Value Pointer to the value to store.
10155 */
10156DECL_NO_INLINE(IEM_STATIC, void)
10157iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10158{
10159 /* The lazy approach for now... */
10160 if ((GCPtrMem & 31) == 0)
10161 {
10162 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10163 pu256Dst->au64[0] = pu256Value->au64[0];
10164 pu256Dst->au64[1] = pu256Value->au64[1];
10165 pu256Dst->au64[2] = pu256Value->au64[2];
10166 pu256Dst->au64[3] = pu256Value->au64[3];
10167 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10168 return;
10169 }
10170
10171 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10172 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10173}
10174#endif
10175
10176
10177/**
10178 * Stores a descriptor register (sgdt, sidt).
10179 *
10180 * @returns Strict VBox status code.
10181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10182 * @param cbLimit The limit.
10183 * @param GCPtrBase The base address.
10184 * @param iSegReg The index of the segment register to use for
10185 * this access. The base and limits are checked.
10186 * @param GCPtrMem The address of the guest memory.
10187 */
10188IEM_STATIC VBOXSTRICTRC
10189iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10190{
10191 /*
10192 * The SIDT and SGDT instructions actually stores the data using two
10193 * independent writes. The instructions does not respond to opsize prefixes.
10194 */
10195 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10196 if (rcStrict == VINF_SUCCESS)
10197 {
10198 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10199 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10200 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10201 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10202 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10203 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10204 else
10205 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10206 }
10207 return rcStrict;
10208}
10209
10210
10211/**
10212 * Pushes a word onto the stack.
10213 *
10214 * @returns Strict VBox status code.
10215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10216 * @param u16Value The value to push.
10217 */
10218IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10219{
10220 /* Increment the stack pointer. */
10221 uint64_t uNewRsp;
10222 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10223
10224 /* Write the word the lazy way. */
10225 uint16_t *pu16Dst;
10226 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10227 if (rc == VINF_SUCCESS)
10228 {
10229 *pu16Dst = u16Value;
10230 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10231 }
10232
10233 /* Commit the new RSP value unless we an access handler made trouble. */
10234 if (rc == VINF_SUCCESS)
10235 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10236
10237 return rc;
10238}
10239
10240
10241/**
10242 * Pushes a dword onto the stack.
10243 *
10244 * @returns Strict VBox status code.
10245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10246 * @param u32Value The value to push.
10247 */
10248IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10249{
10250 /* Increment the stack pointer. */
10251 uint64_t uNewRsp;
10252 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10253
10254 /* Write the dword the lazy way. */
10255 uint32_t *pu32Dst;
10256 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10257 if (rc == VINF_SUCCESS)
10258 {
10259 *pu32Dst = u32Value;
10260 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10261 }
10262
10263 /* Commit the new RSP value unless we an access handler made trouble. */
10264 if (rc == VINF_SUCCESS)
10265 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10266
10267 return rc;
10268}
10269
10270
10271/**
10272 * Pushes a dword segment register value onto the stack.
10273 *
10274 * @returns Strict VBox status code.
10275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10276 * @param u32Value The value to push.
10277 */
10278IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10279{
10280 /* Increment the stack pointer. */
10281 uint64_t uNewRsp;
10282 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10283
10284 /* The intel docs talks about zero extending the selector register
10285 value. My actual intel CPU here might be zero extending the value
10286 but it still only writes the lower word... */
10287 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10288 * happens when crossing an electric page boundrary, is the high word checked
10289 * for write accessibility or not? Probably it is. What about segment limits?
10290 * It appears this behavior is also shared with trap error codes.
10291 *
10292 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10293 * ancient hardware when it actually did change. */
10294 uint16_t *pu16Dst;
10295 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10296 if (rc == VINF_SUCCESS)
10297 {
10298 *pu16Dst = (uint16_t)u32Value;
10299 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10300 }
10301
10302 /* Commit the new RSP value unless we an access handler made trouble. */
10303 if (rc == VINF_SUCCESS)
10304 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10305
10306 return rc;
10307}
10308
10309
10310/**
10311 * Pushes a qword onto the stack.
10312 *
10313 * @returns Strict VBox status code.
10314 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10315 * @param u64Value The value to push.
10316 */
10317IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10318{
10319 /* Increment the stack pointer. */
10320 uint64_t uNewRsp;
10321 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10322
10323 /* Write the word the lazy way. */
10324 uint64_t *pu64Dst;
10325 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10326 if (rc == VINF_SUCCESS)
10327 {
10328 *pu64Dst = u64Value;
10329 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10330 }
10331
10332 /* Commit the new RSP value unless we an access handler made trouble. */
10333 if (rc == VINF_SUCCESS)
10334 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10335
10336 return rc;
10337}
10338
10339
10340/**
10341 * Pops a word from the stack.
10342 *
10343 * @returns Strict VBox status code.
10344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10345 * @param pu16Value Where to store the popped value.
10346 */
10347IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10348{
10349 /* Increment the stack pointer. */
10350 uint64_t uNewRsp;
10351 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10352
10353 /* Write the word the lazy way. */
10354 uint16_t const *pu16Src;
10355 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10356 if (rc == VINF_SUCCESS)
10357 {
10358 *pu16Value = *pu16Src;
10359 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10360
10361 /* Commit the new RSP value. */
10362 if (rc == VINF_SUCCESS)
10363 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10364 }
10365
10366 return rc;
10367}
10368
10369
10370/**
10371 * Pops a dword from the stack.
10372 *
10373 * @returns Strict VBox status code.
10374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10375 * @param pu32Value Where to store the popped value.
10376 */
10377IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10378{
10379 /* Increment the stack pointer. */
10380 uint64_t uNewRsp;
10381 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10382
10383 /* Write the word the lazy way. */
10384 uint32_t const *pu32Src;
10385 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10386 if (rc == VINF_SUCCESS)
10387 {
10388 *pu32Value = *pu32Src;
10389 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10390
10391 /* Commit the new RSP value. */
10392 if (rc == VINF_SUCCESS)
10393 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10394 }
10395
10396 return rc;
10397}
10398
10399
10400/**
10401 * Pops a qword from the stack.
10402 *
10403 * @returns Strict VBox status code.
10404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10405 * @param pu64Value Where to store the popped value.
10406 */
10407IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10408{
10409 /* Increment the stack pointer. */
10410 uint64_t uNewRsp;
10411 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10412
10413 /* Write the word the lazy way. */
10414 uint64_t const *pu64Src;
10415 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10416 if (rc == VINF_SUCCESS)
10417 {
10418 *pu64Value = *pu64Src;
10419 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10420
10421 /* Commit the new RSP value. */
10422 if (rc == VINF_SUCCESS)
10423 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10424 }
10425
10426 return rc;
10427}
10428
10429
10430/**
10431 * Pushes a word onto the stack, using a temporary stack pointer.
10432 *
10433 * @returns Strict VBox status code.
10434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10435 * @param u16Value The value to push.
10436 * @param pTmpRsp Pointer to the temporary stack pointer.
10437 */
10438IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10439{
10440 /* Increment the stack pointer. */
10441 RTUINT64U NewRsp = *pTmpRsp;
10442 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10443
10444 /* Write the word the lazy way. */
10445 uint16_t *pu16Dst;
10446 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10447 if (rc == VINF_SUCCESS)
10448 {
10449 *pu16Dst = u16Value;
10450 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10451 }
10452
10453 /* Commit the new RSP value unless we an access handler made trouble. */
10454 if (rc == VINF_SUCCESS)
10455 *pTmpRsp = NewRsp;
10456
10457 return rc;
10458}
10459
10460
10461/**
10462 * Pushes a dword onto the stack, using a temporary stack pointer.
10463 *
10464 * @returns Strict VBox status code.
10465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10466 * @param u32Value The value to push.
10467 * @param pTmpRsp Pointer to the temporary stack pointer.
10468 */
10469IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10470{
10471 /* Increment the stack pointer. */
10472 RTUINT64U NewRsp = *pTmpRsp;
10473 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10474
10475 /* Write the word the lazy way. */
10476 uint32_t *pu32Dst;
10477 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10478 if (rc == VINF_SUCCESS)
10479 {
10480 *pu32Dst = u32Value;
10481 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10482 }
10483
10484 /* Commit the new RSP value unless we an access handler made trouble. */
10485 if (rc == VINF_SUCCESS)
10486 *pTmpRsp = NewRsp;
10487
10488 return rc;
10489}
10490
10491
10492/**
10493 * Pushes a dword onto the stack, using a temporary stack pointer.
10494 *
10495 * @returns Strict VBox status code.
10496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10497 * @param u64Value The value to push.
10498 * @param pTmpRsp Pointer to the temporary stack pointer.
10499 */
10500IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10501{
10502 /* Increment the stack pointer. */
10503 RTUINT64U NewRsp = *pTmpRsp;
10504 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10505
10506 /* Write the word the lazy way. */
10507 uint64_t *pu64Dst;
10508 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10509 if (rc == VINF_SUCCESS)
10510 {
10511 *pu64Dst = u64Value;
10512 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10513 }
10514
10515 /* Commit the new RSP value unless we an access handler made trouble. */
10516 if (rc == VINF_SUCCESS)
10517 *pTmpRsp = NewRsp;
10518
10519 return rc;
10520}
10521
10522
10523/**
10524 * Pops a word from the stack, using a temporary stack pointer.
10525 *
10526 * @returns Strict VBox status code.
10527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10528 * @param pu16Value Where to store the popped value.
10529 * @param pTmpRsp Pointer to the temporary stack pointer.
10530 */
10531IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10532{
10533 /* Increment the stack pointer. */
10534 RTUINT64U NewRsp = *pTmpRsp;
10535 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10536
10537 /* Write the word the lazy way. */
10538 uint16_t const *pu16Src;
10539 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10540 if (rc == VINF_SUCCESS)
10541 {
10542 *pu16Value = *pu16Src;
10543 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10544
10545 /* Commit the new RSP value. */
10546 if (rc == VINF_SUCCESS)
10547 *pTmpRsp = NewRsp;
10548 }
10549
10550 return rc;
10551}
10552
10553
10554/**
10555 * Pops a dword from the stack, using a temporary stack pointer.
10556 *
10557 * @returns Strict VBox status code.
10558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10559 * @param pu32Value Where to store the popped value.
10560 * @param pTmpRsp Pointer to the temporary stack pointer.
10561 */
10562IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10563{
10564 /* Increment the stack pointer. */
10565 RTUINT64U NewRsp = *pTmpRsp;
10566 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10567
10568 /* Write the word the lazy way. */
10569 uint32_t const *pu32Src;
10570 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10571 if (rc == VINF_SUCCESS)
10572 {
10573 *pu32Value = *pu32Src;
10574 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10575
10576 /* Commit the new RSP value. */
10577 if (rc == VINF_SUCCESS)
10578 *pTmpRsp = NewRsp;
10579 }
10580
10581 return rc;
10582}
10583
10584
10585/**
10586 * Pops a qword from the stack, using a temporary stack pointer.
10587 *
10588 * @returns Strict VBox status code.
10589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10590 * @param pu64Value Where to store the popped value.
10591 * @param pTmpRsp Pointer to the temporary stack pointer.
10592 */
10593IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10594{
10595 /* Increment the stack pointer. */
10596 RTUINT64U NewRsp = *pTmpRsp;
10597 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10598
10599 /* Write the word the lazy way. */
10600 uint64_t const *pu64Src;
10601 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10602 if (rcStrict == VINF_SUCCESS)
10603 {
10604 *pu64Value = *pu64Src;
10605 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10606
10607 /* Commit the new RSP value. */
10608 if (rcStrict == VINF_SUCCESS)
10609 *pTmpRsp = NewRsp;
10610 }
10611
10612 return rcStrict;
10613}
10614
10615
10616/**
10617 * Begin a special stack push (used by interrupt, exceptions and such).
10618 *
10619 * This will raise \#SS or \#PF if appropriate.
10620 *
10621 * @returns Strict VBox status code.
10622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10623 * @param cbMem The number of bytes to push onto the stack.
10624 * @param ppvMem Where to return the pointer to the stack memory.
10625 * As with the other memory functions this could be
10626 * direct access or bounce buffered access, so
10627 * don't commit register until the commit call
10628 * succeeds.
10629 * @param puNewRsp Where to return the new RSP value. This must be
10630 * passed unchanged to
10631 * iemMemStackPushCommitSpecial().
10632 */
10633IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10634{
10635 Assert(cbMem < UINT8_MAX);
10636 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10637 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10638}
10639
10640
10641/**
10642 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10643 *
10644 * This will update the rSP.
10645 *
10646 * @returns Strict VBox status code.
10647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10648 * @param pvMem The pointer returned by
10649 * iemMemStackPushBeginSpecial().
10650 * @param uNewRsp The new RSP value returned by
10651 * iemMemStackPushBeginSpecial().
10652 */
10653IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10654{
10655 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10656 if (rcStrict == VINF_SUCCESS)
10657 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10658 return rcStrict;
10659}
10660
10661
10662/**
10663 * Begin a special stack pop (used by iret, retf and such).
10664 *
10665 * This will raise \#SS or \#PF if appropriate.
10666 *
10667 * @returns Strict VBox status code.
10668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10669 * @param cbMem The number of bytes to pop from the stack.
10670 * @param ppvMem Where to return the pointer to the stack memory.
10671 * @param puNewRsp Where to return the new RSP value. This must be
10672 * assigned to CPUMCTX::rsp manually some time
10673 * after iemMemStackPopDoneSpecial() has been
10674 * called.
10675 */
10676IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10677{
10678 Assert(cbMem < UINT8_MAX);
10679 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10680 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10681}
10682
10683
10684/**
10685 * Continue a special stack pop (used by iret and retf).
10686 *
10687 * This will raise \#SS or \#PF if appropriate.
10688 *
10689 * @returns Strict VBox status code.
10690 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10691 * @param cbMem The number of bytes to pop from the stack.
10692 * @param ppvMem Where to return the pointer to the stack memory.
10693 * @param puNewRsp Where to return the new RSP value. This must be
10694 * assigned to CPUMCTX::rsp manually some time
10695 * after iemMemStackPopDoneSpecial() has been
10696 * called.
10697 */
10698IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10699{
10700 Assert(cbMem < UINT8_MAX);
10701 RTUINT64U NewRsp;
10702 NewRsp.u = *puNewRsp;
10703 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10704 *puNewRsp = NewRsp.u;
10705 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10706}
10707
10708
10709/**
10710 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10711 * iemMemStackPopContinueSpecial).
10712 *
10713 * The caller will manually commit the rSP.
10714 *
10715 * @returns Strict VBox status code.
10716 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10717 * @param pvMem The pointer returned by
10718 * iemMemStackPopBeginSpecial() or
10719 * iemMemStackPopContinueSpecial().
10720 */
10721IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10722{
10723 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10724}
10725
10726
10727/**
10728 * Fetches a system table byte.
10729 *
10730 * @returns Strict VBox status code.
10731 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10732 * @param pbDst Where to return the byte.
10733 * @param iSegReg The index of the segment register to use for
10734 * this access. The base and limits are checked.
10735 * @param GCPtrMem The address of the guest memory.
10736 */
10737IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10738{
10739 /* The lazy approach for now... */
10740 uint8_t const *pbSrc;
10741 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10742 if (rc == VINF_SUCCESS)
10743 {
10744 *pbDst = *pbSrc;
10745 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10746 }
10747 return rc;
10748}
10749
10750
10751/**
10752 * Fetches a system table word.
10753 *
10754 * @returns Strict VBox status code.
10755 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10756 * @param pu16Dst Where to return the word.
10757 * @param iSegReg The index of the segment register to use for
10758 * this access. The base and limits are checked.
10759 * @param GCPtrMem The address of the guest memory.
10760 */
10761IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10762{
10763 /* The lazy approach for now... */
10764 uint16_t const *pu16Src;
10765 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10766 if (rc == VINF_SUCCESS)
10767 {
10768 *pu16Dst = *pu16Src;
10769 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10770 }
10771 return rc;
10772}
10773
10774
10775/**
10776 * Fetches a system table dword.
10777 *
10778 * @returns Strict VBox status code.
10779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10780 * @param pu32Dst Where to return the dword.
10781 * @param iSegReg The index of the segment register to use for
10782 * this access. The base and limits are checked.
10783 * @param GCPtrMem The address of the guest memory.
10784 */
10785IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10786{
10787 /* The lazy approach for now... */
10788 uint32_t const *pu32Src;
10789 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10790 if (rc == VINF_SUCCESS)
10791 {
10792 *pu32Dst = *pu32Src;
10793 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10794 }
10795 return rc;
10796}
10797
10798
10799/**
10800 * Fetches a system table qword.
10801 *
10802 * @returns Strict VBox status code.
10803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10804 * @param pu64Dst Where to return the qword.
10805 * @param iSegReg The index of the segment register to use for
10806 * this access. The base and limits are checked.
10807 * @param GCPtrMem The address of the guest memory.
10808 */
10809IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10810{
10811 /* The lazy approach for now... */
10812 uint64_t const *pu64Src;
10813 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10814 if (rc == VINF_SUCCESS)
10815 {
10816 *pu64Dst = *pu64Src;
10817 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10818 }
10819 return rc;
10820}
10821
10822
10823/**
10824 * Fetches a descriptor table entry with caller specified error code.
10825 *
10826 * @returns Strict VBox status code.
10827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10828 * @param pDesc Where to return the descriptor table entry.
10829 * @param uSel The selector which table entry to fetch.
10830 * @param uXcpt The exception to raise on table lookup error.
10831 * @param uErrorCode The error code associated with the exception.
10832 */
10833IEM_STATIC VBOXSTRICTRC
10834iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10835{
10836 AssertPtr(pDesc);
10837 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10838
10839 /** @todo did the 286 require all 8 bytes to be accessible? */
10840 /*
10841 * Get the selector table base and check bounds.
10842 */
10843 RTGCPTR GCPtrBase;
10844 if (uSel & X86_SEL_LDT)
10845 {
10846 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10847 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10848 {
10849 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10850 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10851 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10852 uErrorCode, 0);
10853 }
10854
10855 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10856 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10857 }
10858 else
10859 {
10860 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10861 {
10862 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10863 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10864 uErrorCode, 0);
10865 }
10866 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10867 }
10868
10869 /*
10870 * Read the legacy descriptor and maybe the long mode extensions if
10871 * required.
10872 */
10873 VBOXSTRICTRC rcStrict;
10874 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10875 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10876 else
10877 {
10878 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10879 if (rcStrict == VINF_SUCCESS)
10880 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10881 if (rcStrict == VINF_SUCCESS)
10882 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10883 if (rcStrict == VINF_SUCCESS)
10884 pDesc->Legacy.au16[3] = 0;
10885 else
10886 return rcStrict;
10887 }
10888
10889 if (rcStrict == VINF_SUCCESS)
10890 {
10891 if ( !IEM_IS_LONG_MODE(pVCpu)
10892 || pDesc->Legacy.Gen.u1DescType)
10893 pDesc->Long.au64[1] = 0;
10894 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10895 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10896 else
10897 {
10898 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10899 /** @todo is this the right exception? */
10900 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10901 }
10902 }
10903 return rcStrict;
10904}
10905
10906
10907/**
10908 * Fetches a descriptor table entry.
10909 *
10910 * @returns Strict VBox status code.
10911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10912 * @param pDesc Where to return the descriptor table entry.
10913 * @param uSel The selector which table entry to fetch.
10914 * @param uXcpt The exception to raise on table lookup error.
10915 */
10916IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10917{
10918 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10919}
10920
10921
10922/**
10923 * Fakes a long mode stack selector for SS = 0.
10924 *
10925 * @param pDescSs Where to return the fake stack descriptor.
10926 * @param uDpl The DPL we want.
10927 */
10928IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10929{
10930 pDescSs->Long.au64[0] = 0;
10931 pDescSs->Long.au64[1] = 0;
10932 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10933 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10934 pDescSs->Long.Gen.u2Dpl = uDpl;
10935 pDescSs->Long.Gen.u1Present = 1;
10936 pDescSs->Long.Gen.u1Long = 1;
10937}
10938
10939
10940/**
10941 * Marks the selector descriptor as accessed (only non-system descriptors).
10942 *
10943 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10944 * will therefore skip the limit checks.
10945 *
10946 * @returns Strict VBox status code.
10947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10948 * @param uSel The selector.
10949 */
10950IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10951{
10952 /*
10953 * Get the selector table base and calculate the entry address.
10954 */
10955 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10956 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10957 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10958 GCPtr += uSel & X86_SEL_MASK;
10959
10960 /*
10961 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10962 * ugly stuff to avoid this. This will make sure it's an atomic access
10963 * as well more or less remove any question about 8-bit or 32-bit accesss.
10964 */
10965 VBOXSTRICTRC rcStrict;
10966 uint32_t volatile *pu32;
10967 if ((GCPtr & 3) == 0)
10968 {
10969 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10970 GCPtr += 2 + 2;
10971 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10972 if (rcStrict != VINF_SUCCESS)
10973 return rcStrict;
10974 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10975 }
10976 else
10977 {
10978 /* The misaligned GDT/LDT case, map the whole thing. */
10979 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10980 if (rcStrict != VINF_SUCCESS)
10981 return rcStrict;
10982 switch ((uintptr_t)pu32 & 3)
10983 {
10984 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10985 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10986 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10987 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10988 }
10989 }
10990
10991 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10992}
10993
10994/** @} */
10995
10996
10997/*
10998 * Include the C/C++ implementation of instruction.
10999 */
11000#include "IEMAllCImpl.cpp.h"
11001
11002
11003
11004/** @name "Microcode" macros.
11005 *
11006 * The idea is that we should be able to use the same code to interpret
11007 * instructions as well as recompiler instructions. Thus this obfuscation.
11008 *
11009 * @{
11010 */
11011#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11012#define IEM_MC_END() }
11013#define IEM_MC_PAUSE() do {} while (0)
11014#define IEM_MC_CONTINUE() do {} while (0)
11015
11016/** Internal macro. */
11017#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11018 do \
11019 { \
11020 VBOXSTRICTRC rcStrict2 = a_Expr; \
11021 if (rcStrict2 != VINF_SUCCESS) \
11022 return rcStrict2; \
11023 } while (0)
11024
11025
11026#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11027#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11028#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11029#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11030#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11031#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11032#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11033#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11034#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11035 do { \
11036 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11037 return iemRaiseDeviceNotAvailable(pVCpu); \
11038 } while (0)
11039#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11040 do { \
11041 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11042 return iemRaiseDeviceNotAvailable(pVCpu); \
11043 } while (0)
11044#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11045 do { \
11046 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11047 return iemRaiseMathFault(pVCpu); \
11048 } while (0)
11049#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11050 do { \
11051 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11052 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11053 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11054 return iemRaiseUndefinedOpcode(pVCpu); \
11055 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11056 return iemRaiseDeviceNotAvailable(pVCpu); \
11057 } while (0)
11058#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11059 do { \
11060 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11061 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11062 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11063 return iemRaiseUndefinedOpcode(pVCpu); \
11064 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11065 return iemRaiseDeviceNotAvailable(pVCpu); \
11066 } while (0)
11067#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11068 do { \
11069 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11070 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11071 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11072 return iemRaiseUndefinedOpcode(pVCpu); \
11073 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11074 return iemRaiseDeviceNotAvailable(pVCpu); \
11075 } while (0)
11076#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11077 do { \
11078 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11079 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11080 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11081 return iemRaiseUndefinedOpcode(pVCpu); \
11082 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11083 return iemRaiseDeviceNotAvailable(pVCpu); \
11084 } while (0)
11085#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11086 do { \
11087 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11088 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11089 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11090 return iemRaiseUndefinedOpcode(pVCpu); \
11091 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11092 return iemRaiseDeviceNotAvailable(pVCpu); \
11093 } while (0)
11094#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11095 do { \
11096 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11097 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11098 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11099 return iemRaiseUndefinedOpcode(pVCpu); \
11100 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11101 return iemRaiseDeviceNotAvailable(pVCpu); \
11102 } while (0)
11103#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11104 do { \
11105 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11106 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11107 return iemRaiseUndefinedOpcode(pVCpu); \
11108 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11109 return iemRaiseDeviceNotAvailable(pVCpu); \
11110 } while (0)
11111#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11112 do { \
11113 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11114 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11115 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11116 return iemRaiseUndefinedOpcode(pVCpu); \
11117 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11118 return iemRaiseDeviceNotAvailable(pVCpu); \
11119 } while (0)
11120#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11121 do { \
11122 if (pVCpu->iem.s.uCpl != 0) \
11123 return iemRaiseGeneralProtectionFault0(pVCpu); \
11124 } while (0)
11125#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11126 do { \
11127 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11128 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11129 } while (0)
11130#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11131 do { \
11132 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11133 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11134 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11135 return iemRaiseUndefinedOpcode(pVCpu); \
11136 } while (0)
11137#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11138 do { \
11139 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11140 return iemRaiseGeneralProtectionFault0(pVCpu); \
11141 } while (0)
11142
11143
11144#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11145#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11146#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11147#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11148#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11149#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11150#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11151 uint32_t a_Name; \
11152 uint32_t *a_pName = &a_Name
11153#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11154 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11155
11156#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11157#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11158
11159#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11160#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11161#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11162#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11163#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11164#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11165#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11166#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11167#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11168#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11169#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11170#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11171#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11172#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11173#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11174#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11175#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11176#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11177 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11178 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11179 } while (0)
11180#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11181 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11182 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11183 } while (0)
11184#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11185 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11186 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11187 } while (0)
11188/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11189#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11190 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11191 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11192 } while (0)
11193#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11194 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11195 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11196 } while (0)
11197/** @note Not for IOPL or IF testing or modification. */
11198#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11199#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11200#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11201#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11202
11203#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11204#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11205#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11206#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11207#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11208#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11209#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11210#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11211#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11212#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11213/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11214#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11215 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11216 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11217 } while (0)
11218#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11219 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11220 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11221 } while (0)
11222#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11223 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11224
11225
11226#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11227#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11228/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11229 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11230#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11231#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11232/** @note Not for IOPL or IF testing or modification. */
11233#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11234
11235#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11236#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11237#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11238 do { \
11239 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11240 *pu32Reg += (a_u32Value); \
11241 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11242 } while (0)
11243#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11244
11245#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11246#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11247#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11248 do { \
11249 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11250 *pu32Reg -= (a_u32Value); \
11251 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11252 } while (0)
11253#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11254#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11255
11256#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11257#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11258#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11259#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11260#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11261#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11262#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11263
11264#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11265#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11266#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11267#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11268
11269#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11270#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11271#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11272
11273#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11274#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11275#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11276
11277#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11278#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11279#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11280
11281#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11282#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11283#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11284
11285#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11286
11287#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11288
11289#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11290#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11291#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11292 do { \
11293 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11294 *pu32Reg &= (a_u32Value); \
11295 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11296 } while (0)
11297#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11298
11299#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11300#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11301#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11302 do { \
11303 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11304 *pu32Reg |= (a_u32Value); \
11305 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11306 } while (0)
11307#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11308
11309
11310/** @note Not for IOPL or IF modification. */
11311#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11312/** @note Not for IOPL or IF modification. */
11313#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11314/** @note Not for IOPL or IF modification. */
11315#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11316
11317#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11318
11319/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11320#define IEM_MC_FPU_TO_MMX_MODE() do { \
11321 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11322 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11323 } while (0)
11324
11325/** Switches the FPU state from MMX mode (FTW=0xffff). */
11326#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11327 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11328 } while (0)
11329
11330#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11331 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11332#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11333 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11334#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11335 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11336 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11337 } while (0)
11338#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11339 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11340 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11341 } while (0)
11342#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11343 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11344#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11345 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11346#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11347 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11348
11349#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11350 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11351 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11352 } while (0)
11353#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11354 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11355#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11356 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11357#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11358 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11359#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11360 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11361 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11362 } while (0)
11363#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11364 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11365#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11366 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11367 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11368 } while (0)
11369#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11370 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11371#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11372 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11373 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11374 } while (0)
11375#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11376 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11377#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11378 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11379#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11380 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11381#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11382 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11383#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11384 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11385 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11386 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11387 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11388 } while (0)
11389
11390#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11391 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11392 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11393 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11394 } while (0)
11395#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11396 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11397 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11398 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11399 } while (0)
11400#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11401 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11402 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11403 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11404 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11405 } while (0)
11406#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11407 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11408 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11409 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11410 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11411 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11412 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11413 } while (0)
11414
11415#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11416#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11417 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11418 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11419 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11420 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11421 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11422 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11423 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11424 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11425 } while (0)
11426#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11427 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11428 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11429 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11430 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11431 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11432 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11433 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11434 } while (0)
11435#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11436 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11437 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11438 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11439 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11440 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11441 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11442 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11443 } while (0)
11444#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11445 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11446 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11447 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11448 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11449 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11450 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11451 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11452 } while (0)
11453
11454#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11455 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11456#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11457 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11458#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11459 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11460#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11461 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11462 uintptr_t const iYRegTmp = (a_iYReg); \
11463 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11464 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11465 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11466 } while (0)
11467
11468#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11469 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11470 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11471 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11472 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11473 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11474 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11475 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11476 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11477 } while (0)
11478#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11479 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11480 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11481 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11482 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11483 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11484 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11485 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11486 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11487 } while (0)
11488#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11489 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11490 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11491 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11492 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11493 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11494 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11495 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11496 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11497 } while (0)
11498
11499#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11500 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11501 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11502 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11503 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11504 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11505 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11506 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11507 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11508 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11509 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11510 } while (0)
11511#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11512 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11513 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11514 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11515 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11516 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11517 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11518 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11519 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11520 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11521 } while (0)
11522#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11523 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11524 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11525 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11526 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11527 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11528 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11529 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11530 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11531 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11532 } while (0)
11533#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11534 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11535 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11536 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11537 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11538 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11539 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11540 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11541 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11542 } while (0)
11543
11544#ifndef IEM_WITH_SETJMP
11545# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11546 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11547# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11548 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11549# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11550 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11551#else
11552# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11553 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11554# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11555 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11556# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11557 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11558#endif
11559
11560#ifndef IEM_WITH_SETJMP
11561# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11562 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11563# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11564 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11565# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11566 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11567#else
11568# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11569 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11570# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11571 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11572# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11573 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11574#endif
11575
11576#ifndef IEM_WITH_SETJMP
11577# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11578 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11579# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11580 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11581# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11582 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11583#else
11584# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11585 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11586# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11587 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11588# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11589 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11590#endif
11591
11592#ifdef SOME_UNUSED_FUNCTION
11593# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11595#endif
11596
11597#ifndef IEM_WITH_SETJMP
11598# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11599 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11600# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11601 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11602# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11603 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11604# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11606#else
11607# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11608 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11609# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11610 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11611# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11612 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11613# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11614 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11615#endif
11616
11617#ifndef IEM_WITH_SETJMP
11618# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11619 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11620# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11621 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11622# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11624#else
11625# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11626 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11627# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11628 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11629# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11630 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11631#endif
11632
11633#ifndef IEM_WITH_SETJMP
11634# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11636# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11637 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11638#else
11639# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11640 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11641# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11642 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11643#endif
11644
11645#ifndef IEM_WITH_SETJMP
11646# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11648# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11649 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11650#else
11651# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11652 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11653# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11654 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11655#endif
11656
11657
11658
11659#ifndef IEM_WITH_SETJMP
11660# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11661 do { \
11662 uint8_t u8Tmp; \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11664 (a_u16Dst) = u8Tmp; \
11665 } while (0)
11666# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11667 do { \
11668 uint8_t u8Tmp; \
11669 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11670 (a_u32Dst) = u8Tmp; \
11671 } while (0)
11672# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11673 do { \
11674 uint8_t u8Tmp; \
11675 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11676 (a_u64Dst) = u8Tmp; \
11677 } while (0)
11678# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11679 do { \
11680 uint16_t u16Tmp; \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11682 (a_u32Dst) = u16Tmp; \
11683 } while (0)
11684# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11685 do { \
11686 uint16_t u16Tmp; \
11687 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11688 (a_u64Dst) = u16Tmp; \
11689 } while (0)
11690# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11691 do { \
11692 uint32_t u32Tmp; \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11694 (a_u64Dst) = u32Tmp; \
11695 } while (0)
11696#else /* IEM_WITH_SETJMP */
11697# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11698 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11699# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11700 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11701# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11702 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11703# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11704 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11706 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11707# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11708 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11709#endif /* IEM_WITH_SETJMP */
11710
11711#ifndef IEM_WITH_SETJMP
11712# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11713 do { \
11714 uint8_t u8Tmp; \
11715 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11716 (a_u16Dst) = (int8_t)u8Tmp; \
11717 } while (0)
11718# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11719 do { \
11720 uint8_t u8Tmp; \
11721 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11722 (a_u32Dst) = (int8_t)u8Tmp; \
11723 } while (0)
11724# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11725 do { \
11726 uint8_t u8Tmp; \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11728 (a_u64Dst) = (int8_t)u8Tmp; \
11729 } while (0)
11730# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11731 do { \
11732 uint16_t u16Tmp; \
11733 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11734 (a_u32Dst) = (int16_t)u16Tmp; \
11735 } while (0)
11736# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11737 do { \
11738 uint16_t u16Tmp; \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11740 (a_u64Dst) = (int16_t)u16Tmp; \
11741 } while (0)
11742# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11743 do { \
11744 uint32_t u32Tmp; \
11745 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11746 (a_u64Dst) = (int32_t)u32Tmp; \
11747 } while (0)
11748#else /* IEM_WITH_SETJMP */
11749# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11750 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11751# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11752 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11753# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11754 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11755# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11756 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11757# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11758 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11759# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11760 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11761#endif /* IEM_WITH_SETJMP */
11762
11763#ifndef IEM_WITH_SETJMP
11764# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11766# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11767 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11768# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11769 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11770# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11772#else
11773# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11774 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11775# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11776 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11777# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11778 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11779# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11780 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11781#endif
11782
11783#ifndef IEM_WITH_SETJMP
11784# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11785 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11786# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11787 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11788# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11789 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11790# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11791 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11792#else
11793# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11794 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11795# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11796 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11797# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11798 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11799# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11800 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11801#endif
11802
11803#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11804#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11805#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11806#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11807#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11808#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11809#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11810 do { \
11811 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11812 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11813 } while (0)
11814
11815#ifndef IEM_WITH_SETJMP
11816# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11818# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11819 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11820#else
11821# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11822 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11823# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11824 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11825#endif
11826
11827#ifndef IEM_WITH_SETJMP
11828# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11830# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11831 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11832#else
11833# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11834 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11835# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11836 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11837#endif
11838
11839
11840#define IEM_MC_PUSH_U16(a_u16Value) \
11841 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11842#define IEM_MC_PUSH_U32(a_u32Value) \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11844#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11846#define IEM_MC_PUSH_U64(a_u64Value) \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11848
11849#define IEM_MC_POP_U16(a_pu16Value) \
11850 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11851#define IEM_MC_POP_U32(a_pu32Value) \
11852 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11853#define IEM_MC_POP_U64(a_pu64Value) \
11854 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11855
11856/** Maps guest memory for direct or bounce buffered access.
11857 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11858 * @remarks May return.
11859 */
11860#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11862
11863/** Maps guest memory for direct or bounce buffered access.
11864 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11865 * @remarks May return.
11866 */
11867#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11868 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11869
11870/** Commits the memory and unmaps the guest memory.
11871 * @remarks May return.
11872 */
11873#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11875
11876/** Commits the memory and unmaps the guest memory unless the FPU status word
11877 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11878 * that would cause FLD not to store.
11879 *
11880 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11881 * store, while \#P will not.
11882 *
11883 * @remarks May in theory return - for now.
11884 */
11885#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11886 do { \
11887 if ( !(a_u16FSW & X86_FSW_ES) \
11888 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11889 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11891 } while (0)
11892
11893/** Calculate efficient address from R/M. */
11894#ifndef IEM_WITH_SETJMP
11895# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11896 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11897#else
11898# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11899 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11900#endif
11901
11902#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11903#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11904#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11905#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11906#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11907#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11908#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11909
11910/**
11911 * Defers the rest of the instruction emulation to a C implementation routine
11912 * and returns, only taking the standard parameters.
11913 *
11914 * @param a_pfnCImpl The pointer to the C routine.
11915 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11916 */
11917#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11918
11919/**
11920 * Defers the rest of instruction emulation to a C implementation routine and
11921 * returns, taking one argument in addition to the standard ones.
11922 *
11923 * @param a_pfnCImpl The pointer to the C routine.
11924 * @param a0 The argument.
11925 */
11926#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11927
11928/**
11929 * Defers the rest of the instruction emulation to a C implementation routine
11930 * and returns, taking two arguments in addition to the standard ones.
11931 *
11932 * @param a_pfnCImpl The pointer to the C routine.
11933 * @param a0 The first extra argument.
11934 * @param a1 The second extra argument.
11935 */
11936#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11937
11938/**
11939 * Defers the rest of the instruction emulation to a C implementation routine
11940 * and returns, taking three arguments in addition to the standard ones.
11941 *
11942 * @param a_pfnCImpl The pointer to the C routine.
11943 * @param a0 The first extra argument.
11944 * @param a1 The second extra argument.
11945 * @param a2 The third extra argument.
11946 */
11947#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11948
11949/**
11950 * Defers the rest of the instruction emulation to a C implementation routine
11951 * and returns, taking four arguments in addition to the standard ones.
11952 *
11953 * @param a_pfnCImpl The pointer to the C routine.
11954 * @param a0 The first extra argument.
11955 * @param a1 The second extra argument.
11956 * @param a2 The third extra argument.
11957 * @param a3 The fourth extra argument.
11958 */
11959#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11960
11961/**
11962 * Defers the rest of the instruction emulation to a C implementation routine
11963 * and returns, taking two arguments in addition to the standard ones.
11964 *
11965 * @param a_pfnCImpl The pointer to the C routine.
11966 * @param a0 The first extra argument.
11967 * @param a1 The second extra argument.
11968 * @param a2 The third extra argument.
11969 * @param a3 The fourth extra argument.
11970 * @param a4 The fifth extra argument.
11971 */
11972#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11973
11974/**
11975 * Defers the entire instruction emulation to a C implementation routine and
11976 * returns, only taking the standard parameters.
11977 *
11978 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11979 *
11980 * @param a_pfnCImpl The pointer to the C routine.
11981 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11982 */
11983#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11984
11985/**
11986 * Defers the entire instruction emulation to a C implementation routine and
11987 * returns, taking one argument in addition to the standard ones.
11988 *
11989 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11990 *
11991 * @param a_pfnCImpl The pointer to the C routine.
11992 * @param a0 The argument.
11993 */
11994#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11995
11996/**
11997 * Defers the entire instruction emulation to a C implementation routine and
11998 * returns, taking two arguments in addition to the standard ones.
11999 *
12000 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12001 *
12002 * @param a_pfnCImpl The pointer to the C routine.
12003 * @param a0 The first extra argument.
12004 * @param a1 The second extra argument.
12005 */
12006#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12007
12008/**
12009 * Defers the entire instruction emulation to a C implementation routine and
12010 * returns, taking three arguments in addition to the standard ones.
12011 *
12012 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12013 *
12014 * @param a_pfnCImpl The pointer to the C routine.
12015 * @param a0 The first extra argument.
12016 * @param a1 The second extra argument.
12017 * @param a2 The third extra argument.
12018 */
12019#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12020
12021/**
12022 * Calls a FPU assembly implementation taking one visible argument.
12023 *
12024 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12025 * @param a0 The first extra argument.
12026 */
12027#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12028 do { \
12029 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12030 } while (0)
12031
12032/**
12033 * Calls a FPU assembly implementation taking two visible arguments.
12034 *
12035 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12036 * @param a0 The first extra argument.
12037 * @param a1 The second extra argument.
12038 */
12039#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12040 do { \
12041 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12042 } while (0)
12043
12044/**
12045 * Calls a FPU assembly implementation taking three visible arguments.
12046 *
12047 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12048 * @param a0 The first extra argument.
12049 * @param a1 The second extra argument.
12050 * @param a2 The third extra argument.
12051 */
12052#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12053 do { \
12054 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12055 } while (0)
12056
12057#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12058 do { \
12059 (a_FpuData).FSW = (a_FSW); \
12060 (a_FpuData).r80Result = *(a_pr80Value); \
12061 } while (0)
12062
12063/** Pushes FPU result onto the stack. */
12064#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12065 iemFpuPushResult(pVCpu, &a_FpuData)
12066/** Pushes FPU result onto the stack and sets the FPUDP. */
12067#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12068 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12069
12070/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12071#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12072 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12073
12074/** Stores FPU result in a stack register. */
12075#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12076 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12077/** Stores FPU result in a stack register and pops the stack. */
12078#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12079 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12080/** Stores FPU result in a stack register and sets the FPUDP. */
12081#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12082 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12083/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12084 * stack. */
12085#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12086 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12087
12088/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12089#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12090 iemFpuUpdateOpcodeAndIp(pVCpu)
12091/** Free a stack register (for FFREE and FFREEP). */
12092#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12093 iemFpuStackFree(pVCpu, a_iStReg)
12094/** Increment the FPU stack pointer. */
12095#define IEM_MC_FPU_STACK_INC_TOP() \
12096 iemFpuStackIncTop(pVCpu)
12097/** Decrement the FPU stack pointer. */
12098#define IEM_MC_FPU_STACK_DEC_TOP() \
12099 iemFpuStackDecTop(pVCpu)
12100
12101/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12102#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12103 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12104/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12105#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12106 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12107/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12108#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12109 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12110/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12111#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12112 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12113/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12114 * stack. */
12115#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12116 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12117/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12118#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12119 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12120
12121/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12122#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12123 iemFpuStackUnderflow(pVCpu, a_iStDst)
12124/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12125 * stack. */
12126#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12127 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12128/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12129 * FPUDS. */
12130#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12131 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12132/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12133 * FPUDS. Pops stack. */
12134#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12135 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12136/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12137 * stack twice. */
12138#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12139 iemFpuStackUnderflowThenPopPop(pVCpu)
12140/** Raises a FPU stack underflow exception for an instruction pushing a result
12141 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12142#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12143 iemFpuStackPushUnderflow(pVCpu)
12144/** Raises a FPU stack underflow exception for an instruction pushing a result
12145 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12146#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12147 iemFpuStackPushUnderflowTwo(pVCpu)
12148
12149/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12150 * FPUIP, FPUCS and FOP. */
12151#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12152 iemFpuStackPushOverflow(pVCpu)
12153/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12154 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12155#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12156 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12157/** Prepares for using the FPU state.
12158 * Ensures that we can use the host FPU in the current context (RC+R0.
12159 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12160#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12161/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12162#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12163/** Actualizes the guest FPU state so it can be accessed and modified. */
12164#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12165
12166/** Prepares for using the SSE state.
12167 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12168 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12169#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12170/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12171#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12172/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12173#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12174
12175/** Prepares for using the AVX state.
12176 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12177 * Ensures the guest AVX state in the CPUMCTX is up to date.
12178 * @note This will include the AVX512 state too when support for it is added
12179 * due to the zero extending feature of VEX instruction. */
12180#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12181/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12182#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12183/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12184#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12185
12186/**
12187 * Calls a MMX assembly implementation taking two visible arguments.
12188 *
12189 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12190 * @param a0 The first extra argument.
12191 * @param a1 The second extra argument.
12192 */
12193#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12194 do { \
12195 IEM_MC_PREPARE_FPU_USAGE(); \
12196 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12197 } while (0)
12198
12199/**
12200 * Calls a MMX assembly implementation taking three visible arguments.
12201 *
12202 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12203 * @param a0 The first extra argument.
12204 * @param a1 The second extra argument.
12205 * @param a2 The third extra argument.
12206 */
12207#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12208 do { \
12209 IEM_MC_PREPARE_FPU_USAGE(); \
12210 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12211 } while (0)
12212
12213
12214/**
12215 * Calls a SSE assembly implementation taking two visible arguments.
12216 *
12217 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12218 * @param a0 The first extra argument.
12219 * @param a1 The second extra argument.
12220 */
12221#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12222 do { \
12223 IEM_MC_PREPARE_SSE_USAGE(); \
12224 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12225 } while (0)
12226
12227/**
12228 * Calls a SSE assembly implementation taking three visible arguments.
12229 *
12230 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12231 * @param a0 The first extra argument.
12232 * @param a1 The second extra argument.
12233 * @param a2 The third extra argument.
12234 */
12235#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12236 do { \
12237 IEM_MC_PREPARE_SSE_USAGE(); \
12238 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12239 } while (0)
12240
12241
12242/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12243 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12244#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12245 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12246
12247/**
12248 * Calls a AVX assembly implementation taking two visible arguments.
12249 *
12250 * There is one implicit zero'th argument, a pointer to the extended state.
12251 *
12252 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12253 * @param a1 The first extra argument.
12254 * @param a2 The second extra argument.
12255 */
12256#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12257 do { \
12258 IEM_MC_PREPARE_AVX_USAGE(); \
12259 a_pfnAImpl(pXState, (a1), (a2)); \
12260 } while (0)
12261
12262/**
12263 * Calls a AVX assembly implementation taking three visible arguments.
12264 *
12265 * There is one implicit zero'th argument, a pointer to the extended state.
12266 *
12267 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12268 * @param a1 The first extra argument.
12269 * @param a2 The second extra argument.
12270 * @param a3 The third extra argument.
12271 */
12272#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12273 do { \
12274 IEM_MC_PREPARE_AVX_USAGE(); \
12275 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12276 } while (0)
12277
12278/** @note Not for IOPL or IF testing. */
12279#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12280/** @note Not for IOPL or IF testing. */
12281#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12282/** @note Not for IOPL or IF testing. */
12283#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12284/** @note Not for IOPL or IF testing. */
12285#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12286/** @note Not for IOPL or IF testing. */
12287#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12288 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12289 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12290/** @note Not for IOPL or IF testing. */
12291#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12292 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12293 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12294/** @note Not for IOPL or IF testing. */
12295#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12296 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12297 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12298 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12299/** @note Not for IOPL or IF testing. */
12300#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12301 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12302 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12303 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12304#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12305#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12306#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12307/** @note Not for IOPL or IF testing. */
12308#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12309 if ( pVCpu->cpum.GstCtx.cx != 0 \
12310 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12311/** @note Not for IOPL or IF testing. */
12312#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12313 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12314 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12315/** @note Not for IOPL or IF testing. */
12316#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12317 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12318 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12319/** @note Not for IOPL or IF testing. */
12320#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12321 if ( pVCpu->cpum.GstCtx.cx != 0 \
12322 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12323/** @note Not for IOPL or IF testing. */
12324#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12325 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12326 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12327/** @note Not for IOPL or IF testing. */
12328#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12329 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12330 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12331#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12332#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12333
12334#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12335 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12336#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12337 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12338#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12339 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12340#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12341 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12342#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12343 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12344#define IEM_MC_IF_FCW_IM() \
12345 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12346
12347#define IEM_MC_ELSE() } else {
12348#define IEM_MC_ENDIF() } do {} while (0)
12349
12350/** @} */
12351
12352
12353/** @name Opcode Debug Helpers.
12354 * @{
12355 */
12356#ifdef VBOX_WITH_STATISTICS
12357# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12358#else
12359# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12360#endif
12361
12362#ifdef DEBUG
12363# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12364 do { \
12365 IEMOP_INC_STATS(a_Stats); \
12366 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12367 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12368 } while (0)
12369
12370# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12371 do { \
12372 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12373 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12374 (void)RT_CONCAT(OP_,a_Upper); \
12375 (void)(a_fDisHints); \
12376 (void)(a_fIemHints); \
12377 } while (0)
12378
12379# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12380 do { \
12381 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12382 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12383 (void)RT_CONCAT(OP_,a_Upper); \
12384 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12385 (void)(a_fDisHints); \
12386 (void)(a_fIemHints); \
12387 } while (0)
12388
12389# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12390 do { \
12391 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12392 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12393 (void)RT_CONCAT(OP_,a_Upper); \
12394 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12395 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12396 (void)(a_fDisHints); \
12397 (void)(a_fIemHints); \
12398 } while (0)
12399
12400# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12401 do { \
12402 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12403 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12404 (void)RT_CONCAT(OP_,a_Upper); \
12405 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12406 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12407 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12408 (void)(a_fDisHints); \
12409 (void)(a_fIemHints); \
12410 } while (0)
12411
12412# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12413 do { \
12414 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12415 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12416 (void)RT_CONCAT(OP_,a_Upper); \
12417 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12418 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12419 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12420 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12421 (void)(a_fDisHints); \
12422 (void)(a_fIemHints); \
12423 } while (0)
12424
12425#else
12426# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12427
12428# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12429 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12430# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12431 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12432# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12433 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12434# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12435 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12436# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12437 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12438
12439#endif
12440
12441#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12442 IEMOP_MNEMONIC0EX(a_Lower, \
12443 #a_Lower, \
12444 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12445#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12446 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12447 #a_Lower " " #a_Op1, \
12448 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12449#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12450 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12451 #a_Lower " " #a_Op1 "," #a_Op2, \
12452 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12453#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12454 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12455 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12456 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12457#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12458 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12459 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12460 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12461
12462/** @} */
12463
12464
12465/** @name Opcode Helpers.
12466 * @{
12467 */
12468
12469#ifdef IN_RING3
12470# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12471 do { \
12472 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12473 else \
12474 { \
12475 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12476 return IEMOP_RAISE_INVALID_OPCODE(); \
12477 } \
12478 } while (0)
12479#else
12480# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12481 do { \
12482 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12483 else return IEMOP_RAISE_INVALID_OPCODE(); \
12484 } while (0)
12485#endif
12486
12487/** The instruction requires a 186 or later. */
12488#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12489# define IEMOP_HLP_MIN_186() do { } while (0)
12490#else
12491# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12492#endif
12493
12494/** The instruction requires a 286 or later. */
12495#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12496# define IEMOP_HLP_MIN_286() do { } while (0)
12497#else
12498# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12499#endif
12500
12501/** The instruction requires a 386 or later. */
12502#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12503# define IEMOP_HLP_MIN_386() do { } while (0)
12504#else
12505# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12506#endif
12507
12508/** The instruction requires a 386 or later if the given expression is true. */
12509#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12510# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12511#else
12512# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12513#endif
12514
12515/** The instruction requires a 486 or later. */
12516#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12517# define IEMOP_HLP_MIN_486() do { } while (0)
12518#else
12519# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12520#endif
12521
12522/** The instruction requires a Pentium (586) or later. */
12523#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12524# define IEMOP_HLP_MIN_586() do { } while (0)
12525#else
12526# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12527#endif
12528
12529/** The instruction requires a PentiumPro (686) or later. */
12530#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12531# define IEMOP_HLP_MIN_686() do { } while (0)
12532#else
12533# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12534#endif
12535
12536
12537/** The instruction raises an \#UD in real and V8086 mode. */
12538#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12539 do \
12540 { \
12541 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12542 else return IEMOP_RAISE_INVALID_OPCODE(); \
12543 } while (0)
12544
12545#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12546/** This instruction raises an \#UD in real and V8086 mode or when not using a
12547 * 64-bit code segment when in long mode (applicable to all VMX instructions
12548 * except VMCALL).
12549 *
12550 * @note Update IEM_VMX_INSTR_CHECKS() if changes are made here.
12551 */
12552#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12553 do \
12554 { \
12555 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12556 && ( !IEM_IS_LONG_MODE(pVCpu) \
12557 || IEM_IS_64BIT_CODE(pVCpu))) \
12558 { /* likely */ } \
12559 else \
12560 { \
12561 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12562 { \
12563 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12564 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12565 return IEMOP_RAISE_INVALID_OPCODE(); \
12566 } \
12567 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12568 { \
12569 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12570 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12571 return IEMOP_RAISE_INVALID_OPCODE(); \
12572 } \
12573 } \
12574 } while (0)
12575
12576/** The instruction can only be executed in VMX operation (VMX root mode and
12577 * non-root mode).
12578 *
12579 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12580 */
12581# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12582 do \
12583 { \
12584 if (IEM_IS_VMX_ROOT_MODE(pVCpu)) { /* likely */ } \
12585 else \
12586 { \
12587 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12588 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12589 return IEMOP_RAISE_INVALID_OPCODE(); \
12590 } \
12591 } while (0)
12592#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12593
12594/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12595 * 64-bit mode. */
12596#define IEMOP_HLP_NO_64BIT() \
12597 do \
12598 { \
12599 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12600 return IEMOP_RAISE_INVALID_OPCODE(); \
12601 } while (0)
12602
12603/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12604 * 64-bit mode. */
12605#define IEMOP_HLP_ONLY_64BIT() \
12606 do \
12607 { \
12608 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12609 return IEMOP_RAISE_INVALID_OPCODE(); \
12610 } while (0)
12611
12612/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12613#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12614 do \
12615 { \
12616 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12617 iemRecalEffOpSize64Default(pVCpu); \
12618 } while (0)
12619
12620/** The instruction has 64-bit operand size if 64-bit mode. */
12621#define IEMOP_HLP_64BIT_OP_SIZE() \
12622 do \
12623 { \
12624 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12625 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12626 } while (0)
12627
12628/** Only a REX prefix immediately preceeding the first opcode byte takes
12629 * effect. This macro helps ensuring this as well as logging bad guest code. */
12630#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12631 do \
12632 { \
12633 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12634 { \
12635 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12636 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12637 pVCpu->iem.s.uRexB = 0; \
12638 pVCpu->iem.s.uRexIndex = 0; \
12639 pVCpu->iem.s.uRexReg = 0; \
12640 iemRecalEffOpSize(pVCpu); \
12641 } \
12642 } while (0)
12643
12644/**
12645 * Done decoding.
12646 */
12647#define IEMOP_HLP_DONE_DECODING() \
12648 do \
12649 { \
12650 /*nothing for now, maybe later... */ \
12651 } while (0)
12652
12653/**
12654 * Done decoding, raise \#UD exception if lock prefix present.
12655 */
12656#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12657 do \
12658 { \
12659 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12660 { /* likely */ } \
12661 else \
12662 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12663 } while (0)
12664
12665
12666/**
12667 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12668 * repnz or size prefixes are present, or if in real or v8086 mode.
12669 */
12670#define IEMOP_HLP_DONE_VEX_DECODING() \
12671 do \
12672 { \
12673 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12674 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12675 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12676 { /* likely */ } \
12677 else \
12678 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12679 } while (0)
12680
12681/**
12682 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12683 * repnz or size prefixes are present, or if in real or v8086 mode.
12684 */
12685#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12686 do \
12687 { \
12688 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12689 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12690 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12691 && pVCpu->iem.s.uVexLength == 0)) \
12692 { /* likely */ } \
12693 else \
12694 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12695 } while (0)
12696
12697
12698/**
12699 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12700 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12701 * register 0, or if in real or v8086 mode.
12702 */
12703#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12704 do \
12705 { \
12706 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12707 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12708 && !pVCpu->iem.s.uVex3rdReg \
12709 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12710 { /* likely */ } \
12711 else \
12712 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12713 } while (0)
12714
12715/**
12716 * Done decoding VEX, no V, L=0.
12717 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12718 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12719 */
12720#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12721 do \
12722 { \
12723 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12724 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12725 && pVCpu->iem.s.uVexLength == 0 \
12726 && pVCpu->iem.s.uVex3rdReg == 0 \
12727 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12728 { /* likely */ } \
12729 else \
12730 return IEMOP_RAISE_INVALID_OPCODE(); \
12731 } while (0)
12732
12733#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12734 do \
12735 { \
12736 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12737 { /* likely */ } \
12738 else \
12739 { \
12740 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12741 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12742 } \
12743 } while (0)
12744#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12745 do \
12746 { \
12747 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12748 { /* likely */ } \
12749 else \
12750 { \
12751 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12752 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12753 } \
12754 } while (0)
12755
12756/**
12757 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12758 * are present.
12759 */
12760#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12761 do \
12762 { \
12763 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12764 { /* likely */ } \
12765 else \
12766 return IEMOP_RAISE_INVALID_OPCODE(); \
12767 } while (0)
12768
12769/**
12770 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12771 * prefixes are present.
12772 */
12773#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12774 do \
12775 { \
12776 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12777 { /* likely */ } \
12778 else \
12779 return IEMOP_RAISE_INVALID_OPCODE(); \
12780 } while (0)
12781
12782
12783/**
12784 * Calculates the effective address of a ModR/M memory operand.
12785 *
12786 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12787 *
12788 * @return Strict VBox status code.
12789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12790 * @param bRm The ModRM byte.
12791 * @param cbImm The size of any immediate following the
12792 * effective address opcode bytes. Important for
12793 * RIP relative addressing.
12794 * @param pGCPtrEff Where to return the effective address.
12795 */
12796IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12797{
12798 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12799# define SET_SS_DEF() \
12800 do \
12801 { \
12802 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12803 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12804 } while (0)
12805
12806 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12807 {
12808/** @todo Check the effective address size crap! */
12809 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12810 {
12811 uint16_t u16EffAddr;
12812
12813 /* Handle the disp16 form with no registers first. */
12814 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12815 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12816 else
12817 {
12818 /* Get the displacment. */
12819 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12820 {
12821 case 0: u16EffAddr = 0; break;
12822 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12823 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12824 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12825 }
12826
12827 /* Add the base and index registers to the disp. */
12828 switch (bRm & X86_MODRM_RM_MASK)
12829 {
12830 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12831 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12832 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12833 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12834 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12835 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12836 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12837 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12838 }
12839 }
12840
12841 *pGCPtrEff = u16EffAddr;
12842 }
12843 else
12844 {
12845 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12846 uint32_t u32EffAddr;
12847
12848 /* Handle the disp32 form with no registers first. */
12849 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12850 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12851 else
12852 {
12853 /* Get the register (or SIB) value. */
12854 switch ((bRm & X86_MODRM_RM_MASK))
12855 {
12856 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12857 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12858 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12859 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12860 case 4: /* SIB */
12861 {
12862 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12863
12864 /* Get the index and scale it. */
12865 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12866 {
12867 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12868 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12869 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12870 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12871 case 4: u32EffAddr = 0; /*none */ break;
12872 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12873 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12874 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12875 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12876 }
12877 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12878
12879 /* add base */
12880 switch (bSib & X86_SIB_BASE_MASK)
12881 {
12882 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12883 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12884 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12885 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12886 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12887 case 5:
12888 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12889 {
12890 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12891 SET_SS_DEF();
12892 }
12893 else
12894 {
12895 uint32_t u32Disp;
12896 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12897 u32EffAddr += u32Disp;
12898 }
12899 break;
12900 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12901 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12902 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12903 }
12904 break;
12905 }
12906 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12907 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12908 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12909 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12910 }
12911
12912 /* Get and add the displacement. */
12913 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12914 {
12915 case 0:
12916 break;
12917 case 1:
12918 {
12919 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12920 u32EffAddr += i8Disp;
12921 break;
12922 }
12923 case 2:
12924 {
12925 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12926 u32EffAddr += u32Disp;
12927 break;
12928 }
12929 default:
12930 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12931 }
12932
12933 }
12934 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12935 *pGCPtrEff = u32EffAddr;
12936 else
12937 {
12938 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12939 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12940 }
12941 }
12942 }
12943 else
12944 {
12945 uint64_t u64EffAddr;
12946
12947 /* Handle the rip+disp32 form with no registers first. */
12948 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12949 {
12950 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12951 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12952 }
12953 else
12954 {
12955 /* Get the register (or SIB) value. */
12956 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12957 {
12958 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12959 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12960 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12961 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12962 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12963 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12964 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12965 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12966 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12967 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12968 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12969 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12970 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12971 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12972 /* SIB */
12973 case 4:
12974 case 12:
12975 {
12976 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12977
12978 /* Get the index and scale it. */
12979 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12980 {
12981 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12982 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12983 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12984 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12985 case 4: u64EffAddr = 0; /*none */ break;
12986 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12987 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12988 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12989 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12990 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12991 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12992 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12993 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
12994 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12995 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12996 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12997 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12998 }
12999 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13000
13001 /* add base */
13002 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13003 {
13004 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13005 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13006 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13007 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13008 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13009 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13010 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13011 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13012 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13013 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13014 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13015 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13016 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13017 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13018 /* complicated encodings */
13019 case 5:
13020 case 13:
13021 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13022 {
13023 if (!pVCpu->iem.s.uRexB)
13024 {
13025 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13026 SET_SS_DEF();
13027 }
13028 else
13029 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13030 }
13031 else
13032 {
13033 uint32_t u32Disp;
13034 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13035 u64EffAddr += (int32_t)u32Disp;
13036 }
13037 break;
13038 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13039 }
13040 break;
13041 }
13042 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13043 }
13044
13045 /* Get and add the displacement. */
13046 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13047 {
13048 case 0:
13049 break;
13050 case 1:
13051 {
13052 int8_t i8Disp;
13053 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13054 u64EffAddr += i8Disp;
13055 break;
13056 }
13057 case 2:
13058 {
13059 uint32_t u32Disp;
13060 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13061 u64EffAddr += (int32_t)u32Disp;
13062 break;
13063 }
13064 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13065 }
13066
13067 }
13068
13069 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13070 *pGCPtrEff = u64EffAddr;
13071 else
13072 {
13073 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13074 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13075 }
13076 }
13077
13078 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13079 return VINF_SUCCESS;
13080}
13081
13082
13083/**
13084 * Calculates the effective address of a ModR/M memory operand.
13085 *
13086 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13087 *
13088 * @return Strict VBox status code.
13089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13090 * @param bRm The ModRM byte.
13091 * @param cbImm The size of any immediate following the
13092 * effective address opcode bytes. Important for
13093 * RIP relative addressing.
13094 * @param pGCPtrEff Where to return the effective address.
13095 * @param offRsp RSP displacement.
13096 */
13097IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13098{
13099 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13100# define SET_SS_DEF() \
13101 do \
13102 { \
13103 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13104 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13105 } while (0)
13106
13107 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13108 {
13109/** @todo Check the effective address size crap! */
13110 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13111 {
13112 uint16_t u16EffAddr;
13113
13114 /* Handle the disp16 form with no registers first. */
13115 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13116 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13117 else
13118 {
13119 /* Get the displacment. */
13120 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13121 {
13122 case 0: u16EffAddr = 0; break;
13123 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13124 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13125 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13126 }
13127
13128 /* Add the base and index registers to the disp. */
13129 switch (bRm & X86_MODRM_RM_MASK)
13130 {
13131 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13132 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13133 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13134 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13135 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13136 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13137 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13138 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13139 }
13140 }
13141
13142 *pGCPtrEff = u16EffAddr;
13143 }
13144 else
13145 {
13146 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13147 uint32_t u32EffAddr;
13148
13149 /* Handle the disp32 form with no registers first. */
13150 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13151 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13152 else
13153 {
13154 /* Get the register (or SIB) value. */
13155 switch ((bRm & X86_MODRM_RM_MASK))
13156 {
13157 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13158 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13159 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13160 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13161 case 4: /* SIB */
13162 {
13163 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13164
13165 /* Get the index and scale it. */
13166 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13167 {
13168 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13169 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13170 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13171 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13172 case 4: u32EffAddr = 0; /*none */ break;
13173 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13174 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13175 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13176 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13177 }
13178 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13179
13180 /* add base */
13181 switch (bSib & X86_SIB_BASE_MASK)
13182 {
13183 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13184 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13185 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13186 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13187 case 4:
13188 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13189 SET_SS_DEF();
13190 break;
13191 case 5:
13192 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13193 {
13194 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13195 SET_SS_DEF();
13196 }
13197 else
13198 {
13199 uint32_t u32Disp;
13200 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13201 u32EffAddr += u32Disp;
13202 }
13203 break;
13204 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13205 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13206 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13207 }
13208 break;
13209 }
13210 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13211 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13212 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13213 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13214 }
13215
13216 /* Get and add the displacement. */
13217 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13218 {
13219 case 0:
13220 break;
13221 case 1:
13222 {
13223 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13224 u32EffAddr += i8Disp;
13225 break;
13226 }
13227 case 2:
13228 {
13229 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13230 u32EffAddr += u32Disp;
13231 break;
13232 }
13233 default:
13234 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13235 }
13236
13237 }
13238 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13239 *pGCPtrEff = u32EffAddr;
13240 else
13241 {
13242 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13243 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13244 }
13245 }
13246 }
13247 else
13248 {
13249 uint64_t u64EffAddr;
13250
13251 /* Handle the rip+disp32 form with no registers first. */
13252 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13253 {
13254 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13255 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13256 }
13257 else
13258 {
13259 /* Get the register (or SIB) value. */
13260 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13261 {
13262 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13263 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13264 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13265 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13266 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13267 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13268 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13269 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13270 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13271 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13272 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13273 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13274 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13275 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13276 /* SIB */
13277 case 4:
13278 case 12:
13279 {
13280 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13281
13282 /* Get the index and scale it. */
13283 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13284 {
13285 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13286 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13287 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13288 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13289 case 4: u64EffAddr = 0; /*none */ break;
13290 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13291 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13292 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13293 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13294 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13295 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13296 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13297 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13298 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13299 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13300 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13301 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13302 }
13303 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13304
13305 /* add base */
13306 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13307 {
13308 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13309 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13310 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13311 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13312 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13313 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13314 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13315 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13316 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13317 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13318 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13319 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13320 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13321 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13322 /* complicated encodings */
13323 case 5:
13324 case 13:
13325 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13326 {
13327 if (!pVCpu->iem.s.uRexB)
13328 {
13329 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13330 SET_SS_DEF();
13331 }
13332 else
13333 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13334 }
13335 else
13336 {
13337 uint32_t u32Disp;
13338 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13339 u64EffAddr += (int32_t)u32Disp;
13340 }
13341 break;
13342 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13343 }
13344 break;
13345 }
13346 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13347 }
13348
13349 /* Get and add the displacement. */
13350 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13351 {
13352 case 0:
13353 break;
13354 case 1:
13355 {
13356 int8_t i8Disp;
13357 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13358 u64EffAddr += i8Disp;
13359 break;
13360 }
13361 case 2:
13362 {
13363 uint32_t u32Disp;
13364 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13365 u64EffAddr += (int32_t)u32Disp;
13366 break;
13367 }
13368 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13369 }
13370
13371 }
13372
13373 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13374 *pGCPtrEff = u64EffAddr;
13375 else
13376 {
13377 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13378 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13379 }
13380 }
13381
13382 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13383 return VINF_SUCCESS;
13384}
13385
13386
13387#ifdef IEM_WITH_SETJMP
13388/**
13389 * Calculates the effective address of a ModR/M memory operand.
13390 *
13391 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13392 *
13393 * May longjmp on internal error.
13394 *
13395 * @return The effective address.
13396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13397 * @param bRm The ModRM byte.
13398 * @param cbImm The size of any immediate following the
13399 * effective address opcode bytes. Important for
13400 * RIP relative addressing.
13401 */
13402IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13403{
13404 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13405# define SET_SS_DEF() \
13406 do \
13407 { \
13408 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13409 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13410 } while (0)
13411
13412 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13413 {
13414/** @todo Check the effective address size crap! */
13415 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13416 {
13417 uint16_t u16EffAddr;
13418
13419 /* Handle the disp16 form with no registers first. */
13420 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13421 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13422 else
13423 {
13424 /* Get the displacment. */
13425 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13426 {
13427 case 0: u16EffAddr = 0; break;
13428 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13429 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13430 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13431 }
13432
13433 /* Add the base and index registers to the disp. */
13434 switch (bRm & X86_MODRM_RM_MASK)
13435 {
13436 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13437 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13438 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13439 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13440 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13441 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13442 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13443 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13444 }
13445 }
13446
13447 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13448 return u16EffAddr;
13449 }
13450
13451 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13452 uint32_t u32EffAddr;
13453
13454 /* Handle the disp32 form with no registers first. */
13455 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13456 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13457 else
13458 {
13459 /* Get the register (or SIB) value. */
13460 switch ((bRm & X86_MODRM_RM_MASK))
13461 {
13462 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13463 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13464 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13465 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13466 case 4: /* SIB */
13467 {
13468 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13469
13470 /* Get the index and scale it. */
13471 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13472 {
13473 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13474 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13475 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13476 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13477 case 4: u32EffAddr = 0; /*none */ break;
13478 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13479 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13480 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13481 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13482 }
13483 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13484
13485 /* add base */
13486 switch (bSib & X86_SIB_BASE_MASK)
13487 {
13488 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13489 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13490 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13491 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13492 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13493 case 5:
13494 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13495 {
13496 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13497 SET_SS_DEF();
13498 }
13499 else
13500 {
13501 uint32_t u32Disp;
13502 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13503 u32EffAddr += u32Disp;
13504 }
13505 break;
13506 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13507 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13508 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13509 }
13510 break;
13511 }
13512 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13513 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13514 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13515 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13516 }
13517
13518 /* Get and add the displacement. */
13519 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13520 {
13521 case 0:
13522 break;
13523 case 1:
13524 {
13525 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13526 u32EffAddr += i8Disp;
13527 break;
13528 }
13529 case 2:
13530 {
13531 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13532 u32EffAddr += u32Disp;
13533 break;
13534 }
13535 default:
13536 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13537 }
13538 }
13539
13540 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13541 {
13542 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13543 return u32EffAddr;
13544 }
13545 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13546 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13547 return u32EffAddr & UINT16_MAX;
13548 }
13549
13550 uint64_t u64EffAddr;
13551
13552 /* Handle the rip+disp32 form with no registers first. */
13553 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13554 {
13555 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13556 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13557 }
13558 else
13559 {
13560 /* Get the register (or SIB) value. */
13561 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13562 {
13563 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13564 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13565 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13566 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13567 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13568 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13569 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13570 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13571 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13572 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13573 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13574 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13575 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13576 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13577 /* SIB */
13578 case 4:
13579 case 12:
13580 {
13581 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13582
13583 /* Get the index and scale it. */
13584 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13585 {
13586 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13587 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13588 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13589 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13590 case 4: u64EffAddr = 0; /*none */ break;
13591 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13592 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13593 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13594 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13595 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13596 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13597 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13598 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13599 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13600 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13601 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13602 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13603 }
13604 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13605
13606 /* add base */
13607 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13608 {
13609 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13610 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13611 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13612 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13613 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13614 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13615 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13616 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13617 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13618 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13619 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13620 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13621 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13622 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13623 /* complicated encodings */
13624 case 5:
13625 case 13:
13626 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13627 {
13628 if (!pVCpu->iem.s.uRexB)
13629 {
13630 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13631 SET_SS_DEF();
13632 }
13633 else
13634 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13635 }
13636 else
13637 {
13638 uint32_t u32Disp;
13639 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13640 u64EffAddr += (int32_t)u32Disp;
13641 }
13642 break;
13643 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13644 }
13645 break;
13646 }
13647 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13648 }
13649
13650 /* Get and add the displacement. */
13651 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13652 {
13653 case 0:
13654 break;
13655 case 1:
13656 {
13657 int8_t i8Disp;
13658 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13659 u64EffAddr += i8Disp;
13660 break;
13661 }
13662 case 2:
13663 {
13664 uint32_t u32Disp;
13665 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13666 u64EffAddr += (int32_t)u32Disp;
13667 break;
13668 }
13669 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13670 }
13671
13672 }
13673
13674 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13675 {
13676 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13677 return u64EffAddr;
13678 }
13679 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13680 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13681 return u64EffAddr & UINT32_MAX;
13682}
13683#endif /* IEM_WITH_SETJMP */
13684
13685/** @} */
13686
13687
13688
13689/*
13690 * Include the instructions
13691 */
13692#include "IEMAllInstructions.cpp.h"
13693
13694
13695
13696#ifdef LOG_ENABLED
13697/**
13698 * Logs the current instruction.
13699 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13700 * @param fSameCtx Set if we have the same context information as the VMM,
13701 * clear if we may have already executed an instruction in
13702 * our debug context. When clear, we assume IEMCPU holds
13703 * valid CPU mode info.
13704 *
13705 * The @a fSameCtx parameter is now misleading and obsolete.
13706 * @param pszFunction The IEM function doing the execution.
13707 */
13708IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13709{
13710# ifdef IN_RING3
13711 if (LogIs2Enabled())
13712 {
13713 char szInstr[256];
13714 uint32_t cbInstr = 0;
13715 if (fSameCtx)
13716 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13717 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13718 szInstr, sizeof(szInstr), &cbInstr);
13719 else
13720 {
13721 uint32_t fFlags = 0;
13722 switch (pVCpu->iem.s.enmCpuMode)
13723 {
13724 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13725 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13726 case IEMMODE_16BIT:
13727 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13728 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13729 else
13730 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13731 break;
13732 }
13733 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13734 szInstr, sizeof(szInstr), &cbInstr);
13735 }
13736
13737 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13738 Log2(("**** %s\n"
13739 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13740 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13741 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13742 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13743 " %s\n"
13744 , pszFunction,
13745 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13746 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13747 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13748 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13749 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13750 szInstr));
13751
13752 if (LogIs3Enabled())
13753 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13754 }
13755 else
13756# endif
13757 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13758 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13759 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13760}
13761#endif /* LOG_ENABLED */
13762
13763
13764/**
13765 * Makes status code addjustments (pass up from I/O and access handler)
13766 * as well as maintaining statistics.
13767 *
13768 * @returns Strict VBox status code to pass up.
13769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13770 * @param rcStrict The status from executing an instruction.
13771 */
13772DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13773{
13774 if (rcStrict != VINF_SUCCESS)
13775 {
13776 if (RT_SUCCESS(rcStrict))
13777 {
13778 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13779 || rcStrict == VINF_IOM_R3_IOPORT_READ
13780 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13781 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13782 || rcStrict == VINF_IOM_R3_MMIO_READ
13783 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13784 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13785 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13786 || rcStrict == VINF_CPUM_R3_MSR_READ
13787 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13788 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13789 || rcStrict == VINF_EM_RAW_TO_R3
13790 || rcStrict == VINF_EM_TRIPLE_FAULT
13791 || rcStrict == VINF_GIM_R3_HYPERCALL
13792 /* raw-mode / virt handlers only: */
13793 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13794 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13795 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13796 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13797 || rcStrict == VINF_SELM_SYNC_GDT
13798 || rcStrict == VINF_CSAM_PENDING_ACTION
13799 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13800 /* nested hw.virt codes: */
13801 || rcStrict == VINF_SVM_VMEXIT
13802 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13803/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13804 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13805#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13806 if ( rcStrict == VINF_SVM_VMEXIT
13807 && rcPassUp == VINF_SUCCESS)
13808 rcStrict = VINF_SUCCESS;
13809 else
13810#endif
13811 if (rcPassUp == VINF_SUCCESS)
13812 pVCpu->iem.s.cRetInfStatuses++;
13813 else if ( rcPassUp < VINF_EM_FIRST
13814 || rcPassUp > VINF_EM_LAST
13815 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13816 {
13817 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13818 pVCpu->iem.s.cRetPassUpStatus++;
13819 rcStrict = rcPassUp;
13820 }
13821 else
13822 {
13823 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13824 pVCpu->iem.s.cRetInfStatuses++;
13825 }
13826 }
13827 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13828 pVCpu->iem.s.cRetAspectNotImplemented++;
13829 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13830 pVCpu->iem.s.cRetInstrNotImplemented++;
13831 else
13832 pVCpu->iem.s.cRetErrStatuses++;
13833 }
13834 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13835 {
13836 pVCpu->iem.s.cRetPassUpStatus++;
13837 rcStrict = pVCpu->iem.s.rcPassUp;
13838 }
13839
13840 return rcStrict;
13841}
13842
13843
13844/**
13845 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13846 * IEMExecOneWithPrefetchedByPC.
13847 *
13848 * Similar code is found in IEMExecLots.
13849 *
13850 * @return Strict VBox status code.
13851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13852 * @param fExecuteInhibit If set, execute the instruction following CLI,
13853 * POP SS and MOV SS,GR.
13854 * @param pszFunction The calling function name.
13855 */
13856DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13857{
13858 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13859 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13860 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13861 RT_NOREF_PV(pszFunction);
13862
13863#ifdef IEM_WITH_SETJMP
13864 VBOXSTRICTRC rcStrict;
13865 jmp_buf JmpBuf;
13866 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13867 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13868 if ((rcStrict = setjmp(JmpBuf)) == 0)
13869 {
13870 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13871 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13872 }
13873 else
13874 pVCpu->iem.s.cLongJumps++;
13875 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13876#else
13877 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13878 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13879#endif
13880 if (rcStrict == VINF_SUCCESS)
13881 pVCpu->iem.s.cInstructions++;
13882 if (pVCpu->iem.s.cActiveMappings > 0)
13883 {
13884 Assert(rcStrict != VINF_SUCCESS);
13885 iemMemRollback(pVCpu);
13886 }
13887 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13888 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13889 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13890
13891//#ifdef DEBUG
13892// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13893//#endif
13894
13895 /* Execute the next instruction as well if a cli, pop ss or
13896 mov ss, Gr has just completed successfully. */
13897 if ( fExecuteInhibit
13898 && rcStrict == VINF_SUCCESS
13899 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13900 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13901 {
13902 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13903 if (rcStrict == VINF_SUCCESS)
13904 {
13905#ifdef LOG_ENABLED
13906 iemLogCurInstr(pVCpu, false, pszFunction);
13907#endif
13908#ifdef IEM_WITH_SETJMP
13909 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13910 if ((rcStrict = setjmp(JmpBuf)) == 0)
13911 {
13912 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13913 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13914 }
13915 else
13916 pVCpu->iem.s.cLongJumps++;
13917 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13918#else
13919 IEM_OPCODE_GET_NEXT_U8(&b);
13920 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13921#endif
13922 if (rcStrict == VINF_SUCCESS)
13923 pVCpu->iem.s.cInstructions++;
13924 if (pVCpu->iem.s.cActiveMappings > 0)
13925 {
13926 Assert(rcStrict != VINF_SUCCESS);
13927 iemMemRollback(pVCpu);
13928 }
13929 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13930 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13931 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13932 }
13933 else if (pVCpu->iem.s.cActiveMappings > 0)
13934 iemMemRollback(pVCpu);
13935 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13936 }
13937
13938 /*
13939 * Return value fiddling, statistics and sanity assertions.
13940 */
13941 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13942
13943 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
13944 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
13945 return rcStrict;
13946}
13947
13948
13949#ifdef IN_RC
13950/**
13951 * Re-enters raw-mode or ensure we return to ring-3.
13952 *
13953 * @returns rcStrict, maybe modified.
13954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13955 * @param rcStrict The status code returne by the interpreter.
13956 */
13957DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13958{
13959 if ( !pVCpu->iem.s.fInPatchCode
13960 && ( rcStrict == VINF_SUCCESS
13961 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13962 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13963 {
13964 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13965 CPUMRawEnter(pVCpu);
13966 else
13967 {
13968 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13969 rcStrict = VINF_EM_RESCHEDULE;
13970 }
13971 }
13972 return rcStrict;
13973}
13974#endif
13975
13976
13977/**
13978 * Execute one instruction.
13979 *
13980 * @return Strict VBox status code.
13981 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13982 */
13983VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13984{
13985#ifdef LOG_ENABLED
13986 iemLogCurInstr(pVCpu, true, "IEMExecOne");
13987#endif
13988
13989 /*
13990 * Do the decoding and emulation.
13991 */
13992 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13993 if (rcStrict == VINF_SUCCESS)
13994 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
13995 else if (pVCpu->iem.s.cActiveMappings > 0)
13996 iemMemRollback(pVCpu);
13997
13998#ifdef IN_RC
13999 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14000#endif
14001 if (rcStrict != VINF_SUCCESS)
14002 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14003 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14004 return rcStrict;
14005}
14006
14007
14008VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14009{
14010 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14011
14012 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14013 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14014 if (rcStrict == VINF_SUCCESS)
14015 {
14016 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14017 if (pcbWritten)
14018 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14019 }
14020 else if (pVCpu->iem.s.cActiveMappings > 0)
14021 iemMemRollback(pVCpu);
14022
14023#ifdef IN_RC
14024 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14025#endif
14026 return rcStrict;
14027}
14028
14029
14030VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14031 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14032{
14033 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14034
14035 VBOXSTRICTRC rcStrict;
14036 if ( cbOpcodeBytes
14037 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14038 {
14039 iemInitDecoder(pVCpu, false);
14040#ifdef IEM_WITH_CODE_TLB
14041 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14042 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14043 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14044 pVCpu->iem.s.offCurInstrStart = 0;
14045 pVCpu->iem.s.offInstrNextByte = 0;
14046#else
14047 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14048 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14049#endif
14050 rcStrict = VINF_SUCCESS;
14051 }
14052 else
14053 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14054 if (rcStrict == VINF_SUCCESS)
14055 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14056 else if (pVCpu->iem.s.cActiveMappings > 0)
14057 iemMemRollback(pVCpu);
14058
14059#ifdef IN_RC
14060 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14061#endif
14062 return rcStrict;
14063}
14064
14065
14066VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14067{
14068 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14069
14070 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14071 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14072 if (rcStrict == VINF_SUCCESS)
14073 {
14074 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14075 if (pcbWritten)
14076 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14077 }
14078 else if (pVCpu->iem.s.cActiveMappings > 0)
14079 iemMemRollback(pVCpu);
14080
14081#ifdef IN_RC
14082 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14083#endif
14084 return rcStrict;
14085}
14086
14087
14088VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14089 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14090{
14091 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14092
14093 VBOXSTRICTRC rcStrict;
14094 if ( cbOpcodeBytes
14095 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14096 {
14097 iemInitDecoder(pVCpu, true);
14098#ifdef IEM_WITH_CODE_TLB
14099 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14100 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14101 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14102 pVCpu->iem.s.offCurInstrStart = 0;
14103 pVCpu->iem.s.offInstrNextByte = 0;
14104#else
14105 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14106 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14107#endif
14108 rcStrict = VINF_SUCCESS;
14109 }
14110 else
14111 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14112 if (rcStrict == VINF_SUCCESS)
14113 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14114 else if (pVCpu->iem.s.cActiveMappings > 0)
14115 iemMemRollback(pVCpu);
14116
14117#ifdef IN_RC
14118 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14119#endif
14120 return rcStrict;
14121}
14122
14123
14124/**
14125 * For debugging DISGetParamSize, may come in handy.
14126 *
14127 * @returns Strict VBox status code.
14128 * @param pVCpu The cross context virtual CPU structure of the
14129 * calling EMT.
14130 * @param pCtxCore The context core structure.
14131 * @param OpcodeBytesPC The PC of the opcode bytes.
14132 * @param pvOpcodeBytes Prefeched opcode bytes.
14133 * @param cbOpcodeBytes Number of prefetched bytes.
14134 * @param pcbWritten Where to return the number of bytes written.
14135 * Optional.
14136 */
14137VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14138 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14139 uint32_t *pcbWritten)
14140{
14141 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14142
14143 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14144 VBOXSTRICTRC rcStrict;
14145 if ( cbOpcodeBytes
14146 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14147 {
14148 iemInitDecoder(pVCpu, true);
14149#ifdef IEM_WITH_CODE_TLB
14150 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14151 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14152 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14153 pVCpu->iem.s.offCurInstrStart = 0;
14154 pVCpu->iem.s.offInstrNextByte = 0;
14155#else
14156 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14157 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14158#endif
14159 rcStrict = VINF_SUCCESS;
14160 }
14161 else
14162 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14163 if (rcStrict == VINF_SUCCESS)
14164 {
14165 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14166 if (pcbWritten)
14167 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14168 }
14169 else if (pVCpu->iem.s.cActiveMappings > 0)
14170 iemMemRollback(pVCpu);
14171
14172#ifdef IN_RC
14173 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14174#endif
14175 return rcStrict;
14176}
14177
14178
14179VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14180{
14181 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14182
14183 /*
14184 * See if there is an interrupt pending in TRPM, inject it if we can.
14185 */
14186 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14187#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14188 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14189 if (fIntrEnabled)
14190 {
14191 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14192 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14193 else
14194 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14195 }
14196#else
14197 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14198#endif
14199 if ( fIntrEnabled
14200 && TRPMHasTrap(pVCpu)
14201 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14202 {
14203 uint8_t u8TrapNo;
14204 TRPMEVENT enmType;
14205 RTGCUINT uErrCode;
14206 RTGCPTR uCr2;
14207 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14208 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14209 TRPMResetTrap(pVCpu);
14210 }
14211
14212 /*
14213 * Initial decoder init w/ prefetch, then setup setjmp.
14214 */
14215 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14216 if (rcStrict == VINF_SUCCESS)
14217 {
14218#ifdef IEM_WITH_SETJMP
14219 jmp_buf JmpBuf;
14220 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14221 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14222 pVCpu->iem.s.cActiveMappings = 0;
14223 if ((rcStrict = setjmp(JmpBuf)) == 0)
14224#endif
14225 {
14226 /*
14227 * The run loop. We limit ourselves to 4096 instructions right now.
14228 */
14229 PVM pVM = pVCpu->CTX_SUFF(pVM);
14230 uint32_t cInstr = 4096;
14231 for (;;)
14232 {
14233 /*
14234 * Log the state.
14235 */
14236#ifdef LOG_ENABLED
14237 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14238#endif
14239
14240 /*
14241 * Do the decoding and emulation.
14242 */
14243 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14244 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14245 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14246 {
14247 Assert(pVCpu->iem.s.cActiveMappings == 0);
14248 pVCpu->iem.s.cInstructions++;
14249 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14250 {
14251 uint32_t fCpu = pVCpu->fLocalForcedActions
14252 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14253 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14254 | VMCPU_FF_TLB_FLUSH
14255#ifdef VBOX_WITH_RAW_MODE
14256 | VMCPU_FF_TRPM_SYNC_IDT
14257 | VMCPU_FF_SELM_SYNC_TSS
14258 | VMCPU_FF_SELM_SYNC_GDT
14259 | VMCPU_FF_SELM_SYNC_LDT
14260#endif
14261 | VMCPU_FF_INHIBIT_INTERRUPTS
14262 | VMCPU_FF_BLOCK_NMIS
14263 | VMCPU_FF_UNHALT ));
14264
14265 if (RT_LIKELY( ( !fCpu
14266 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14267 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14268 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14269 {
14270 if (cInstr-- > 0)
14271 {
14272 Assert(pVCpu->iem.s.cActiveMappings == 0);
14273 iemReInitDecoder(pVCpu);
14274 continue;
14275 }
14276 }
14277 }
14278 Assert(pVCpu->iem.s.cActiveMappings == 0);
14279 }
14280 else if (pVCpu->iem.s.cActiveMappings > 0)
14281 iemMemRollback(pVCpu);
14282 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14283 break;
14284 }
14285 }
14286#ifdef IEM_WITH_SETJMP
14287 else
14288 {
14289 if (pVCpu->iem.s.cActiveMappings > 0)
14290 iemMemRollback(pVCpu);
14291 pVCpu->iem.s.cLongJumps++;
14292 }
14293 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14294#endif
14295
14296 /*
14297 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14298 */
14299 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14300 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14301 }
14302 else
14303 {
14304 if (pVCpu->iem.s.cActiveMappings > 0)
14305 iemMemRollback(pVCpu);
14306
14307#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14308 /*
14309 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14310 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14311 */
14312 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14313#endif
14314 }
14315
14316 /*
14317 * Maybe re-enter raw-mode and log.
14318 */
14319#ifdef IN_RC
14320 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14321#endif
14322 if (rcStrict != VINF_SUCCESS)
14323 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14324 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14325 if (pcInstructions)
14326 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14327 return rcStrict;
14328}
14329
14330
14331/**
14332 * Interface used by EMExecuteExec, does exit statistics and limits.
14333 *
14334 * @returns Strict VBox status code.
14335 * @param pVCpu The cross context virtual CPU structure.
14336 * @param fWillExit To be defined.
14337 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14338 * @param cMaxInstructions Maximum number of instructions to execute.
14339 * @param cMaxInstructionsWithoutExits
14340 * The max number of instructions without exits.
14341 * @param pStats Where to return statistics.
14342 */
14343VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14344 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14345{
14346 NOREF(fWillExit); /** @todo define flexible exit crits */
14347
14348 /*
14349 * Initialize return stats.
14350 */
14351 pStats->cInstructions = 0;
14352 pStats->cExits = 0;
14353 pStats->cMaxExitDistance = 0;
14354 pStats->cReserved = 0;
14355
14356 /*
14357 * Initial decoder init w/ prefetch, then setup setjmp.
14358 */
14359 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14360 if (rcStrict == VINF_SUCCESS)
14361 {
14362#ifdef IEM_WITH_SETJMP
14363 jmp_buf JmpBuf;
14364 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14365 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14366 pVCpu->iem.s.cActiveMappings = 0;
14367 if ((rcStrict = setjmp(JmpBuf)) == 0)
14368#endif
14369 {
14370#ifdef IN_RING0
14371 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14372#endif
14373 uint32_t cInstructionSinceLastExit = 0;
14374
14375 /*
14376 * The run loop. We limit ourselves to 4096 instructions right now.
14377 */
14378 PVM pVM = pVCpu->CTX_SUFF(pVM);
14379 for (;;)
14380 {
14381 /*
14382 * Log the state.
14383 */
14384#ifdef LOG_ENABLED
14385 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14386#endif
14387
14388 /*
14389 * Do the decoding and emulation.
14390 */
14391 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14392
14393 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14394 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14395
14396 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14397 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14398 {
14399 pStats->cExits += 1;
14400 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14401 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14402 cInstructionSinceLastExit = 0;
14403 }
14404
14405 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14406 {
14407 Assert(pVCpu->iem.s.cActiveMappings == 0);
14408 pVCpu->iem.s.cInstructions++;
14409 pStats->cInstructions++;
14410 cInstructionSinceLastExit++;
14411 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14412 {
14413 uint32_t fCpu = pVCpu->fLocalForcedActions
14414 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14415 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14416 | VMCPU_FF_TLB_FLUSH
14417#ifdef VBOX_WITH_RAW_MODE
14418 | VMCPU_FF_TRPM_SYNC_IDT
14419 | VMCPU_FF_SELM_SYNC_TSS
14420 | VMCPU_FF_SELM_SYNC_GDT
14421 | VMCPU_FF_SELM_SYNC_LDT
14422#endif
14423 | VMCPU_FF_INHIBIT_INTERRUPTS
14424 | VMCPU_FF_BLOCK_NMIS
14425 | VMCPU_FF_UNHALT ));
14426
14427 if (RT_LIKELY( ( ( !fCpu
14428 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14429 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14430 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )
14431 || pStats->cInstructions < cMinInstructions))
14432 {
14433 if (pStats->cInstructions < cMaxInstructions)
14434 {
14435 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14436 {
14437#ifdef IN_RING0
14438 if ( !fCheckPreemptionPending
14439 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14440#endif
14441 {
14442 Assert(pVCpu->iem.s.cActiveMappings == 0);
14443 iemReInitDecoder(pVCpu);
14444 continue;
14445 }
14446#ifdef IN_RING0
14447 rcStrict = VINF_EM_RAW_INTERRUPT;
14448 break;
14449#endif
14450 }
14451 }
14452 }
14453 Assert(!(fCpu & VMCPU_FF_IEM));
14454 }
14455 Assert(pVCpu->iem.s.cActiveMappings == 0);
14456 }
14457 else if (pVCpu->iem.s.cActiveMappings > 0)
14458 iemMemRollback(pVCpu);
14459 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14460 break;
14461 }
14462 }
14463#ifdef IEM_WITH_SETJMP
14464 else
14465 {
14466 if (pVCpu->iem.s.cActiveMappings > 0)
14467 iemMemRollback(pVCpu);
14468 pVCpu->iem.s.cLongJumps++;
14469 }
14470 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14471#endif
14472
14473 /*
14474 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14475 */
14476 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14477 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14478 }
14479 else
14480 {
14481 if (pVCpu->iem.s.cActiveMappings > 0)
14482 iemMemRollback(pVCpu);
14483
14484#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14485 /*
14486 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14487 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14488 */
14489 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14490#endif
14491 }
14492
14493 /*
14494 * Maybe re-enter raw-mode and log.
14495 */
14496#ifdef IN_RC
14497 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14498#endif
14499 if (rcStrict != VINF_SUCCESS)
14500 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14501 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14502 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14503 return rcStrict;
14504}
14505
14506
14507/**
14508 * Injects a trap, fault, abort, software interrupt or external interrupt.
14509 *
14510 * The parameter list matches TRPMQueryTrapAll pretty closely.
14511 *
14512 * @returns Strict VBox status code.
14513 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14514 * @param u8TrapNo The trap number.
14515 * @param enmType What type is it (trap/fault/abort), software
14516 * interrupt or hardware interrupt.
14517 * @param uErrCode The error code if applicable.
14518 * @param uCr2 The CR2 value if applicable.
14519 * @param cbInstr The instruction length (only relevant for
14520 * software interrupts).
14521 */
14522VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14523 uint8_t cbInstr)
14524{
14525 iemInitDecoder(pVCpu, false);
14526#ifdef DBGFTRACE_ENABLED
14527 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14528 u8TrapNo, enmType, uErrCode, uCr2);
14529#endif
14530
14531 uint32_t fFlags;
14532 switch (enmType)
14533 {
14534 case TRPM_HARDWARE_INT:
14535 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14536 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14537 uErrCode = uCr2 = 0;
14538 break;
14539
14540 case TRPM_SOFTWARE_INT:
14541 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14542 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14543 uErrCode = uCr2 = 0;
14544 break;
14545
14546 case TRPM_TRAP:
14547 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14548 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14549 if (u8TrapNo == X86_XCPT_PF)
14550 fFlags |= IEM_XCPT_FLAGS_CR2;
14551 switch (u8TrapNo)
14552 {
14553 case X86_XCPT_DF:
14554 case X86_XCPT_TS:
14555 case X86_XCPT_NP:
14556 case X86_XCPT_SS:
14557 case X86_XCPT_PF:
14558 case X86_XCPT_AC:
14559 fFlags |= IEM_XCPT_FLAGS_ERR;
14560 break;
14561
14562 case X86_XCPT_NMI:
14563 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14564 break;
14565 }
14566 break;
14567
14568 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14569 }
14570
14571 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14572
14573 if (pVCpu->iem.s.cActiveMappings > 0)
14574 iemMemRollback(pVCpu);
14575
14576 return rcStrict;
14577}
14578
14579
14580/**
14581 * Injects the active TRPM event.
14582 *
14583 * @returns Strict VBox status code.
14584 * @param pVCpu The cross context virtual CPU structure.
14585 */
14586VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14587{
14588#ifndef IEM_IMPLEMENTS_TASKSWITCH
14589 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14590#else
14591 uint8_t u8TrapNo;
14592 TRPMEVENT enmType;
14593 RTGCUINT uErrCode;
14594 RTGCUINTPTR uCr2;
14595 uint8_t cbInstr;
14596 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14597 if (RT_FAILURE(rc))
14598 return rc;
14599
14600 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14601# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14602 if (rcStrict == VINF_SVM_VMEXIT)
14603 rcStrict = VINF_SUCCESS;
14604# endif
14605
14606 /** @todo Are there any other codes that imply the event was successfully
14607 * delivered to the guest? See @bugref{6607}. */
14608 if ( rcStrict == VINF_SUCCESS
14609 || rcStrict == VINF_IEM_RAISED_XCPT)
14610 TRPMResetTrap(pVCpu);
14611
14612 return rcStrict;
14613#endif
14614}
14615
14616
14617VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14618{
14619 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14620 return VERR_NOT_IMPLEMENTED;
14621}
14622
14623
14624VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14625{
14626 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14627 return VERR_NOT_IMPLEMENTED;
14628}
14629
14630
14631#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14632/**
14633 * Executes a IRET instruction with default operand size.
14634 *
14635 * This is for PATM.
14636 *
14637 * @returns VBox status code.
14638 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14639 * @param pCtxCore The register frame.
14640 */
14641VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14642{
14643 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14644
14645 iemCtxCoreToCtx(pCtx, pCtxCore);
14646 iemInitDecoder(pVCpu);
14647 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14648 if (rcStrict == VINF_SUCCESS)
14649 iemCtxToCtxCore(pCtxCore, pCtx);
14650 else
14651 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14652 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14653 return rcStrict;
14654}
14655#endif
14656
14657
14658/**
14659 * Macro used by the IEMExec* method to check the given instruction length.
14660 *
14661 * Will return on failure!
14662 *
14663 * @param a_cbInstr The given instruction length.
14664 * @param a_cbMin The minimum length.
14665 */
14666#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14667 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14668 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14669
14670
14671/**
14672 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14673 *
14674 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14675 *
14676 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14678 * @param rcStrict The status code to fiddle.
14679 */
14680DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14681{
14682 iemUninitExec(pVCpu);
14683#ifdef IN_RC
14684 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14685#else
14686 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14687#endif
14688}
14689
14690
14691/**
14692 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14693 *
14694 * This API ASSUMES that the caller has already verified that the guest code is
14695 * allowed to access the I/O port. (The I/O port is in the DX register in the
14696 * guest state.)
14697 *
14698 * @returns Strict VBox status code.
14699 * @param pVCpu The cross context virtual CPU structure.
14700 * @param cbValue The size of the I/O port access (1, 2, or 4).
14701 * @param enmAddrMode The addressing mode.
14702 * @param fRepPrefix Indicates whether a repeat prefix is used
14703 * (doesn't matter which for this instruction).
14704 * @param cbInstr The instruction length in bytes.
14705 * @param iEffSeg The effective segment address.
14706 * @param fIoChecked Whether the access to the I/O port has been
14707 * checked or not. It's typically checked in the
14708 * HM scenario.
14709 */
14710VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14711 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14712{
14713 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14714 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14715
14716 /*
14717 * State init.
14718 */
14719 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14720
14721 /*
14722 * Switch orgy for getting to the right handler.
14723 */
14724 VBOXSTRICTRC rcStrict;
14725 if (fRepPrefix)
14726 {
14727 switch (enmAddrMode)
14728 {
14729 case IEMMODE_16BIT:
14730 switch (cbValue)
14731 {
14732 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14733 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14734 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14735 default:
14736 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14737 }
14738 break;
14739
14740 case IEMMODE_32BIT:
14741 switch (cbValue)
14742 {
14743 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14744 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14745 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14746 default:
14747 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14748 }
14749 break;
14750
14751 case IEMMODE_64BIT:
14752 switch (cbValue)
14753 {
14754 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14755 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14756 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14757 default:
14758 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14759 }
14760 break;
14761
14762 default:
14763 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14764 }
14765 }
14766 else
14767 {
14768 switch (enmAddrMode)
14769 {
14770 case IEMMODE_16BIT:
14771 switch (cbValue)
14772 {
14773 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14774 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14775 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14776 default:
14777 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14778 }
14779 break;
14780
14781 case IEMMODE_32BIT:
14782 switch (cbValue)
14783 {
14784 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14785 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14786 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14787 default:
14788 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14789 }
14790 break;
14791
14792 case IEMMODE_64BIT:
14793 switch (cbValue)
14794 {
14795 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14796 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14797 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14798 default:
14799 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14800 }
14801 break;
14802
14803 default:
14804 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14805 }
14806 }
14807
14808 if (pVCpu->iem.s.cActiveMappings)
14809 iemMemRollback(pVCpu);
14810
14811 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14812}
14813
14814
14815/**
14816 * Interface for HM and EM for executing string I/O IN (read) instructions.
14817 *
14818 * This API ASSUMES that the caller has already verified that the guest code is
14819 * allowed to access the I/O port. (The I/O port is in the DX register in the
14820 * guest state.)
14821 *
14822 * @returns Strict VBox status code.
14823 * @param pVCpu The cross context virtual CPU structure.
14824 * @param cbValue The size of the I/O port access (1, 2, or 4).
14825 * @param enmAddrMode The addressing mode.
14826 * @param fRepPrefix Indicates whether a repeat prefix is used
14827 * (doesn't matter which for this instruction).
14828 * @param cbInstr The instruction length in bytes.
14829 * @param fIoChecked Whether the access to the I/O port has been
14830 * checked or not. It's typically checked in the
14831 * HM scenario.
14832 */
14833VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14834 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14835{
14836 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14837
14838 /*
14839 * State init.
14840 */
14841 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14842
14843 /*
14844 * Switch orgy for getting to the right handler.
14845 */
14846 VBOXSTRICTRC rcStrict;
14847 if (fRepPrefix)
14848 {
14849 switch (enmAddrMode)
14850 {
14851 case IEMMODE_16BIT:
14852 switch (cbValue)
14853 {
14854 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14855 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14856 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14857 default:
14858 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14859 }
14860 break;
14861
14862 case IEMMODE_32BIT:
14863 switch (cbValue)
14864 {
14865 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14866 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14867 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14868 default:
14869 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14870 }
14871 break;
14872
14873 case IEMMODE_64BIT:
14874 switch (cbValue)
14875 {
14876 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14877 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14878 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14879 default:
14880 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14881 }
14882 break;
14883
14884 default:
14885 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14886 }
14887 }
14888 else
14889 {
14890 switch (enmAddrMode)
14891 {
14892 case IEMMODE_16BIT:
14893 switch (cbValue)
14894 {
14895 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14896 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14897 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14898 default:
14899 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14900 }
14901 break;
14902
14903 case IEMMODE_32BIT:
14904 switch (cbValue)
14905 {
14906 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14907 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14908 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14909 default:
14910 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14911 }
14912 break;
14913
14914 case IEMMODE_64BIT:
14915 switch (cbValue)
14916 {
14917 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14918 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14919 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14920 default:
14921 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14922 }
14923 break;
14924
14925 default:
14926 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14927 }
14928 }
14929
14930 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14931 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14932}
14933
14934
14935/**
14936 * Interface for rawmode to write execute an OUT instruction.
14937 *
14938 * @returns Strict VBox status code.
14939 * @param pVCpu The cross context virtual CPU structure.
14940 * @param cbInstr The instruction length in bytes.
14941 * @param u16Port The port to read.
14942 * @param cbReg The register size.
14943 *
14944 * @remarks In ring-0 not all of the state needs to be synced in.
14945 */
14946VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14947{
14948 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14949 Assert(cbReg <= 4 && cbReg != 3);
14950
14951 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14952 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14953 Assert(!pVCpu->iem.s.cActiveMappings);
14954 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14955}
14956
14957
14958/**
14959 * Interface for rawmode to write execute an IN instruction.
14960 *
14961 * @returns Strict VBox status code.
14962 * @param pVCpu The cross context virtual CPU structure.
14963 * @param cbInstr The instruction length in bytes.
14964 * @param u16Port The port to read.
14965 * @param cbReg The register size.
14966 */
14967VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14968{
14969 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14970 Assert(cbReg <= 4 && cbReg != 3);
14971
14972 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14973 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14974 Assert(!pVCpu->iem.s.cActiveMappings);
14975 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14976}
14977
14978
14979/**
14980 * Interface for HM and EM to write to a CRx register.
14981 *
14982 * @returns Strict VBox status code.
14983 * @param pVCpu The cross context virtual CPU structure.
14984 * @param cbInstr The instruction length in bytes.
14985 * @param iCrReg The control register number (destination).
14986 * @param iGReg The general purpose register number (source).
14987 *
14988 * @remarks In ring-0 not all of the state needs to be synced in.
14989 */
14990VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14991{
14992 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14993 Assert(iCrReg < 16);
14994 Assert(iGReg < 16);
14995
14996 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14997 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14998 Assert(!pVCpu->iem.s.cActiveMappings);
14999 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15000}
15001
15002
15003/**
15004 * Interface for HM and EM to read from a CRx register.
15005 *
15006 * @returns Strict VBox status code.
15007 * @param pVCpu The cross context virtual CPU structure.
15008 * @param cbInstr The instruction length in bytes.
15009 * @param iGReg The general purpose register number (destination).
15010 * @param iCrReg The control register number (source).
15011 *
15012 * @remarks In ring-0 not all of the state needs to be synced in.
15013 */
15014VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15015{
15016 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15017 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15018 | CPUMCTX_EXTRN_APIC_TPR);
15019 Assert(iCrReg < 16);
15020 Assert(iGReg < 16);
15021
15022 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15023 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15024 Assert(!pVCpu->iem.s.cActiveMappings);
15025 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15026}
15027
15028
15029/**
15030 * Interface for HM and EM to clear the CR0[TS] bit.
15031 *
15032 * @returns Strict VBox status code.
15033 * @param pVCpu The cross context virtual CPU structure.
15034 * @param cbInstr The instruction length in bytes.
15035 *
15036 * @remarks In ring-0 not all of the state needs to be synced in.
15037 */
15038VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15039{
15040 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15041
15042 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15043 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15044 Assert(!pVCpu->iem.s.cActiveMappings);
15045 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15046}
15047
15048
15049/**
15050 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15051 *
15052 * @returns Strict VBox status code.
15053 * @param pVCpu The cross context virtual CPU structure.
15054 * @param cbInstr The instruction length in bytes.
15055 * @param uValue The value to load into CR0.
15056 *
15057 * @remarks In ring-0 not all of the state needs to be synced in.
15058 */
15059VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15060{
15061 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15062
15063 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15064 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15065 Assert(!pVCpu->iem.s.cActiveMappings);
15066 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15067}
15068
15069
15070/**
15071 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15072 *
15073 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15074 *
15075 * @returns Strict VBox status code.
15076 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15077 * @param cbInstr The instruction length in bytes.
15078 * @remarks In ring-0 not all of the state needs to be synced in.
15079 * @thread EMT(pVCpu)
15080 */
15081VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15082{
15083 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15084
15085 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15086 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15087 Assert(!pVCpu->iem.s.cActiveMappings);
15088 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15089}
15090
15091
15092/**
15093 * Interface for HM and EM to emulate the WBINVD instruction.
15094 *
15095 * @returns Strict VBox status code.
15096 * @param pVCpu The cross context virtual CPU structure.
15097 * @param cbInstr The instruction length in bytes.
15098 *
15099 * @remarks In ring-0 not all of the state needs to be synced in.
15100 */
15101VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15102{
15103 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15104
15105 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15106 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15107 Assert(!pVCpu->iem.s.cActiveMappings);
15108 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15109}
15110
15111
15112/**
15113 * Interface for HM and EM to emulate the INVD instruction.
15114 *
15115 * @returns Strict VBox status code.
15116 * @param pVCpu The cross context virtual CPU structure.
15117 * @param cbInstr The instruction length in bytes.
15118 *
15119 * @remarks In ring-0 not all of the state needs to be synced in.
15120 */
15121VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15122{
15123 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15124
15125 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15126 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15127 Assert(!pVCpu->iem.s.cActiveMappings);
15128 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15129}
15130
15131
15132/**
15133 * Interface for HM and EM to emulate the INVLPG instruction.
15134 *
15135 * @returns Strict VBox status code.
15136 * @retval VINF_PGM_SYNC_CR3
15137 *
15138 * @param pVCpu The cross context virtual CPU structure.
15139 * @param cbInstr The instruction length in bytes.
15140 * @param GCPtrPage The effective address of the page to invalidate.
15141 *
15142 * @remarks In ring-0 not all of the state needs to be synced in.
15143 */
15144VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15145{
15146 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15147
15148 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15149 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15150 Assert(!pVCpu->iem.s.cActiveMappings);
15151 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15152}
15153
15154
15155/**
15156 * Interface for HM and EM to emulate the CPUID instruction.
15157 *
15158 * @returns Strict VBox status code.
15159 *
15160 * @param pVCpu The cross context virtual CPU structure.
15161 * @param cbInstr The instruction length in bytes.
15162 *
15163 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15164 */
15165VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15166{
15167 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15168 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15169
15170 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15171 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15172 Assert(!pVCpu->iem.s.cActiveMappings);
15173 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15174}
15175
15176
15177/**
15178 * Interface for HM and EM to emulate the RDPMC instruction.
15179 *
15180 * @returns Strict VBox status code.
15181 *
15182 * @param pVCpu The cross context virtual CPU structure.
15183 * @param cbInstr The instruction length in bytes.
15184 *
15185 * @remarks Not all of the state needs to be synced in.
15186 */
15187VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15188{
15189 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15190 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15191
15192 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15193 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15194 Assert(!pVCpu->iem.s.cActiveMappings);
15195 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15196}
15197
15198
15199/**
15200 * Interface for HM and EM to emulate the RDTSC instruction.
15201 *
15202 * @returns Strict VBox status code.
15203 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15204 *
15205 * @param pVCpu The cross context virtual CPU structure.
15206 * @param cbInstr The instruction length in bytes.
15207 *
15208 * @remarks Not all of the state needs to be synced in.
15209 */
15210VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15211{
15212 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15213 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15214
15215 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15216 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15217 Assert(!pVCpu->iem.s.cActiveMappings);
15218 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15219}
15220
15221
15222/**
15223 * Interface for HM and EM to emulate the RDTSCP instruction.
15224 *
15225 * @returns Strict VBox status code.
15226 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15227 *
15228 * @param pVCpu The cross context virtual CPU structure.
15229 * @param cbInstr The instruction length in bytes.
15230 *
15231 * @remarks Not all of the state needs to be synced in. Recommended
15232 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15233 */
15234VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15235{
15236 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15237 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15238
15239 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15240 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15241 Assert(!pVCpu->iem.s.cActiveMappings);
15242 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15243}
15244
15245
15246/**
15247 * Interface for HM and EM to emulate the RDMSR instruction.
15248 *
15249 * @returns Strict VBox status code.
15250 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15251 *
15252 * @param pVCpu The cross context virtual CPU structure.
15253 * @param cbInstr The instruction length in bytes.
15254 *
15255 * @remarks Not all of the state needs to be synced in. Requires RCX and
15256 * (currently) all MSRs.
15257 */
15258VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15259{
15260 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15261 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15262
15263 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15264 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15265 Assert(!pVCpu->iem.s.cActiveMappings);
15266 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15267}
15268
15269
15270/**
15271 * Interface for HM and EM to emulate the WRMSR instruction.
15272 *
15273 * @returns Strict VBox status code.
15274 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15275 *
15276 * @param pVCpu The cross context virtual CPU structure.
15277 * @param cbInstr The instruction length in bytes.
15278 *
15279 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15280 * and (currently) all MSRs.
15281 */
15282VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15283{
15284 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15285 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15286 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15287
15288 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15289 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15290 Assert(!pVCpu->iem.s.cActiveMappings);
15291 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15292}
15293
15294
15295/**
15296 * Interface for HM and EM to emulate the MONITOR instruction.
15297 *
15298 * @returns Strict VBox status code.
15299 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15300 *
15301 * @param pVCpu The cross context virtual CPU structure.
15302 * @param cbInstr The instruction length in bytes.
15303 *
15304 * @remarks Not all of the state needs to be synced in.
15305 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15306 * are used.
15307 */
15308VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15309{
15310 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15311 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15312
15313 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15314 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15315 Assert(!pVCpu->iem.s.cActiveMappings);
15316 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15317}
15318
15319
15320/**
15321 * Interface for HM and EM to emulate the MWAIT instruction.
15322 *
15323 * @returns Strict VBox status code.
15324 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15325 *
15326 * @param pVCpu The cross context virtual CPU structure.
15327 * @param cbInstr The instruction length in bytes.
15328 *
15329 * @remarks Not all of the state needs to be synced in.
15330 */
15331VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15332{
15333 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15334
15335 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15336 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15337 Assert(!pVCpu->iem.s.cActiveMappings);
15338 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15339}
15340
15341
15342/**
15343 * Interface for HM and EM to emulate the HLT instruction.
15344 *
15345 * @returns Strict VBox status code.
15346 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15347 *
15348 * @param pVCpu The cross context virtual CPU structure.
15349 * @param cbInstr The instruction length in bytes.
15350 *
15351 * @remarks Not all of the state needs to be synced in.
15352 */
15353VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15354{
15355 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15356
15357 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15358 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15359 Assert(!pVCpu->iem.s.cActiveMappings);
15360 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15361}
15362
15363
15364/**
15365 * Checks if IEM is in the process of delivering an event (interrupt or
15366 * exception).
15367 *
15368 * @returns true if we're in the process of raising an interrupt or exception,
15369 * false otherwise.
15370 * @param pVCpu The cross context virtual CPU structure.
15371 * @param puVector Where to store the vector associated with the
15372 * currently delivered event, optional.
15373 * @param pfFlags Where to store th event delivery flags (see
15374 * IEM_XCPT_FLAGS_XXX), optional.
15375 * @param puErr Where to store the error code associated with the
15376 * event, optional.
15377 * @param puCr2 Where to store the CR2 associated with the event,
15378 * optional.
15379 * @remarks The caller should check the flags to determine if the error code and
15380 * CR2 are valid for the event.
15381 */
15382VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15383{
15384 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15385 if (fRaisingXcpt)
15386 {
15387 if (puVector)
15388 *puVector = pVCpu->iem.s.uCurXcpt;
15389 if (pfFlags)
15390 *pfFlags = pVCpu->iem.s.fCurXcpt;
15391 if (puErr)
15392 *puErr = pVCpu->iem.s.uCurXcptErr;
15393 if (puCr2)
15394 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15395 }
15396 return fRaisingXcpt;
15397}
15398
15399#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15400
15401/**
15402 * Interface for HM and EM to emulate the CLGI instruction.
15403 *
15404 * @returns Strict VBox status code.
15405 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15406 * @param cbInstr The instruction length in bytes.
15407 * @thread EMT(pVCpu)
15408 */
15409VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15410{
15411 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15412
15413 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15414 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15415 Assert(!pVCpu->iem.s.cActiveMappings);
15416 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15417}
15418
15419
15420/**
15421 * Interface for HM and EM to emulate the STGI instruction.
15422 *
15423 * @returns Strict VBox status code.
15424 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15425 * @param cbInstr The instruction length in bytes.
15426 * @thread EMT(pVCpu)
15427 */
15428VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15429{
15430 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15431
15432 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15433 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15434 Assert(!pVCpu->iem.s.cActiveMappings);
15435 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15436}
15437
15438
15439/**
15440 * Interface for HM and EM to emulate the VMLOAD instruction.
15441 *
15442 * @returns Strict VBox status code.
15443 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15444 * @param cbInstr The instruction length in bytes.
15445 * @thread EMT(pVCpu)
15446 */
15447VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15448{
15449 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15450
15451 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15452 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15453 Assert(!pVCpu->iem.s.cActiveMappings);
15454 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15455}
15456
15457
15458/**
15459 * Interface for HM and EM to emulate the VMSAVE instruction.
15460 *
15461 * @returns Strict VBox status code.
15462 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15463 * @param cbInstr The instruction length in bytes.
15464 * @thread EMT(pVCpu)
15465 */
15466VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15467{
15468 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15469
15470 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15471 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15472 Assert(!pVCpu->iem.s.cActiveMappings);
15473 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15474}
15475
15476
15477/**
15478 * Interface for HM and EM to emulate the INVLPGA instruction.
15479 *
15480 * @returns Strict VBox status code.
15481 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15482 * @param cbInstr The instruction length in bytes.
15483 * @thread EMT(pVCpu)
15484 */
15485VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15486{
15487 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15488
15489 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15490 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15491 Assert(!pVCpu->iem.s.cActiveMappings);
15492 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15493}
15494
15495
15496/**
15497 * Interface for HM and EM to emulate the VMRUN instruction.
15498 *
15499 * @returns Strict VBox status code.
15500 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15501 * @param cbInstr The instruction length in bytes.
15502 * @thread EMT(pVCpu)
15503 */
15504VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15505{
15506 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15507 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15508
15509 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15510 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15511 Assert(!pVCpu->iem.s.cActiveMappings);
15512 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15513}
15514
15515
15516/**
15517 * Interface for HM and EM to emulate \#VMEXIT.
15518 *
15519 * @returns Strict VBox status code.
15520 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15521 * @param uExitCode The exit code.
15522 * @param uExitInfo1 The exit info. 1 field.
15523 * @param uExitInfo2 The exit info. 2 field.
15524 * @thread EMT(pVCpu)
15525 */
15526VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15527{
15528 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15529 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15530 if (pVCpu->iem.s.cActiveMappings)
15531 iemMemRollback(pVCpu);
15532 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15533}
15534
15535#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15536
15537#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15538
15539/**
15540 * Interface for HM and EM to emulate the VMREAD instruction.
15541 *
15542 * @returns Strict VBox status code.
15543 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15544 * @param pExitInfo Pointer to the VM-exit information struct.
15545 * @thread EMT(pVCpu)
15546 */
15547VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15548{
15549 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15550 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15551 Assert(pExitInfo);
15552
15553 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15554
15555 VBOXSTRICTRC rcStrict;
15556 uint8_t const cbInstr = pExitInfo->cbInstr;
15557 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15558 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15559 {
15560 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15561 {
15562 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15563 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15564 }
15565 else
15566 {
15567 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15568 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15569 }
15570 }
15571 else
15572 {
15573 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15574 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15575 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15576 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15577 }
15578 if (pVCpu->iem.s.cActiveMappings)
15579 iemMemRollback(pVCpu);
15580 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15581}
15582
15583
15584/**
15585 * Interface for HM and EM to emulate the VMWRITE instruction.
15586 *
15587 * @returns Strict VBox status code.
15588 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15589 * @param pExitInfo Pointer to the VM-exit information struct.
15590 * @thread EMT(pVCpu)
15591 */
15592VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15593{
15594 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15595 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15596 Assert(pExitInfo);
15597
15598 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15599
15600 uint64_t u64Val;
15601 uint8_t iEffSeg;
15602 IEMMODE enmEffAddrMode;
15603 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15604 {
15605 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15606 iEffSeg = UINT8_MAX;
15607 enmEffAddrMode = UINT8_MAX;
15608 }
15609 else
15610 {
15611 u64Val = pExitInfo->GCPtrEffAddr;
15612 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15613 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15614 }
15615 uint8_t const cbInstr = pExitInfo->cbInstr;
15616 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15617 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15618 if (pVCpu->iem.s.cActiveMappings)
15619 iemMemRollback(pVCpu);
15620 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15621}
15622
15623
15624/**
15625 * Interface for HM and EM to emulate the VMPTRLD instruction.
15626 *
15627 * @returns Strict VBox status code.
15628 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15629 * @param pExitInfo Pointer to the VM-exit information struct.
15630 * @thread EMT(pVCpu)
15631 */
15632VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15633{
15634 Assert(pExitInfo);
15635 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15636 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15637
15638 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15639
15640 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15641 uint8_t const cbInstr = pExitInfo->cbInstr;
15642 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15643 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15644 if (pVCpu->iem.s.cActiveMappings)
15645 iemMemRollback(pVCpu);
15646 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15647}
15648
15649
15650/**
15651 * Interface for HM and EM to emulate the VMPTRST instruction.
15652 *
15653 * @returns Strict VBox status code.
15654 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15655 * @param pExitInfo Pointer to the VM-exit information struct.
15656 * @thread EMT(pVCpu)
15657 */
15658VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15659{
15660 Assert(pExitInfo);
15661 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15662 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15663
15664 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15665
15666 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15667 uint8_t const cbInstr = pExitInfo->cbInstr;
15668 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15669 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15670 if (pVCpu->iem.s.cActiveMappings)
15671 iemMemRollback(pVCpu);
15672 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15673}
15674
15675
15676/**
15677 * Interface for HM and EM to emulate the VMCLEAR instruction.
15678 *
15679 * @returns Strict VBox status code.
15680 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15681 * @param pExitInfo Pointer to the VM-exit information struct.
15682 * @thread EMT(pVCpu)
15683 */
15684VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15685{
15686 Assert(pExitInfo);
15687 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15688 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15689
15690 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15691
15692 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15693 uint8_t const cbInstr = pExitInfo->cbInstr;
15694 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15695 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15696 if (pVCpu->iem.s.cActiveMappings)
15697 iemMemRollback(pVCpu);
15698 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15699}
15700
15701
15702/**
15703 * Interface for HM and EM to emulate the VMXON instruction.
15704 *
15705 * @returns Strict VBox status code.
15706 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15707 * @param pExitInfo Pointer to the VM-exit information struct.
15708 * @thread EMT(pVCpu)
15709 */
15710VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15711{
15712 Assert(pExitInfo);
15713 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15714 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15715
15716 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15717
15718 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15719 uint8_t const cbInstr = pExitInfo->cbInstr;
15720 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15721 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15722 if (pVCpu->iem.s.cActiveMappings)
15723 iemMemRollback(pVCpu);
15724 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15725}
15726
15727
15728/**
15729 * Interface for HM and EM to emulate the VMXOFF instruction.
15730 *
15731 * @returns Strict VBox status code.
15732 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15733 * @param cbInstr The instruction length in bytes.
15734 * @thread EMT(pVCpu)
15735 */
15736VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15737{
15738 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15739
15740 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15741 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15742 Assert(!pVCpu->iem.s.cActiveMappings);
15743 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15744}
15745
15746#endif
15747
15748#ifdef IN_RING3
15749
15750/**
15751 * Handles the unlikely and probably fatal merge cases.
15752 *
15753 * @returns Merged status code.
15754 * @param rcStrict Current EM status code.
15755 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15756 * with @a rcStrict.
15757 * @param iMemMap The memory mapping index. For error reporting only.
15758 * @param pVCpu The cross context virtual CPU structure of the calling
15759 * thread, for error reporting only.
15760 */
15761DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15762 unsigned iMemMap, PVMCPU pVCpu)
15763{
15764 if (RT_FAILURE_NP(rcStrict))
15765 return rcStrict;
15766
15767 if (RT_FAILURE_NP(rcStrictCommit))
15768 return rcStrictCommit;
15769
15770 if (rcStrict == rcStrictCommit)
15771 return rcStrictCommit;
15772
15773 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15774 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15775 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15776 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15777 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15778 return VERR_IOM_FF_STATUS_IPE;
15779}
15780
15781
15782/**
15783 * Helper for IOMR3ProcessForceFlag.
15784 *
15785 * @returns Merged status code.
15786 * @param rcStrict Current EM status code.
15787 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15788 * with @a rcStrict.
15789 * @param iMemMap The memory mapping index. For error reporting only.
15790 * @param pVCpu The cross context virtual CPU structure of the calling
15791 * thread, for error reporting only.
15792 */
15793DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15794{
15795 /* Simple. */
15796 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15797 return rcStrictCommit;
15798
15799 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15800 return rcStrict;
15801
15802 /* EM scheduling status codes. */
15803 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15804 && rcStrict <= VINF_EM_LAST))
15805 {
15806 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15807 && rcStrictCommit <= VINF_EM_LAST))
15808 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15809 }
15810
15811 /* Unlikely */
15812 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15813}
15814
15815
15816/**
15817 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15818 *
15819 * @returns Merge between @a rcStrict and what the commit operation returned.
15820 * @param pVM The cross context VM structure.
15821 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15822 * @param rcStrict The status code returned by ring-0 or raw-mode.
15823 */
15824VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15825{
15826 /*
15827 * Reset the pending commit.
15828 */
15829 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15830 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15831 ("%#x %#x %#x\n",
15832 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15833 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15834
15835 /*
15836 * Commit the pending bounce buffers (usually just one).
15837 */
15838 unsigned cBufs = 0;
15839 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15840 while (iMemMap-- > 0)
15841 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15842 {
15843 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15844 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15845 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15846
15847 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15848 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15849 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15850
15851 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15852 {
15853 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15854 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15855 pbBuf,
15856 cbFirst,
15857 PGMACCESSORIGIN_IEM);
15858 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15859 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15860 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15861 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15862 }
15863
15864 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15865 {
15866 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15867 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15868 pbBuf + cbFirst,
15869 cbSecond,
15870 PGMACCESSORIGIN_IEM);
15871 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15872 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15873 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15874 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15875 }
15876 cBufs++;
15877 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15878 }
15879
15880 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15881 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15882 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15883 pVCpu->iem.s.cActiveMappings = 0;
15884 return rcStrict;
15885}
15886
15887#endif /* IN_RING3 */
15888
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette