VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 74332

Last change on this file since 74332 was 74332, checked in by vboxsync, 6 years ago

VMM/IEM: Nested SVM: bugref:7243 SVM macro cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 627.0 KB
Line 
1/* $Id: IEMAll.cpp 74332 2018-09-18 06:56:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in a 64-bit code segment.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Check if we're currently executing in real mode.
335 *
336 * @returns @c true if it is, @c false if not.
337 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
338 */
339#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
340
341/**
342 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
343 * @returns PCCPUMFEATURES
344 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
345 */
346#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
347
348/**
349 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
350 * @returns PCCPUMFEATURES
351 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
352 */
353#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
354
355/**
356 * Evaluates to true if we're presenting an Intel CPU to the guest.
357 */
358#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
359
360/**
361 * Evaluates to true if we're presenting an AMD CPU to the guest.
362 */
363#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
364
365/**
366 * Check if the address is canonical.
367 */
368#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
369
370/**
371 * Gets the effective VEX.VVVV value.
372 *
373 * The 4th bit is ignored if not 64-bit code.
374 * @returns effective V-register value.
375 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
376 */
377#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
378 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
379
380/** @def IEM_USE_UNALIGNED_DATA_ACCESS
381 * Use unaligned accesses instead of elaborate byte assembly. */
382#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
383# define IEM_USE_UNALIGNED_DATA_ACCESS
384#endif
385
386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
387/**
388 * Check if VMX is enabled.
389 */
390# define IEM_IS_VMX_ENABLED(a_pVCpu) (CPUMIsGuestVmxEnabled(IEM_GET_CTX(a_pVCpu)))
391
392/**
393 * Check if the guest has entered VMX root operation.
394 */
395#define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
396
397/**
398 * Check if the guest has entered VMX non-root operation.
399 */
400#define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
401
402#else
403# define IEM_VMX_INSTR_COMMON_CHECKS(a_pVCpu, a_szInstr, a_InsDiagPrefix) do { } while (0)
404# define IEM_IS_VMX_ENABLED(a_pVCpu) (false)
405# define IEM_IS_VMX_ROOT_MODE(a_pVCpu) (false)
406# define IEM_IS_VMX_NON_ROOT_MODE(a_pVCpu) (false)
407
408#endif
409
410#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
411/**
412 * Check if an SVM control/instruction intercept is set.
413 */
414# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
415
416/**
417 * Check if an SVM read CRx intercept is set.
418 */
419# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
420
421/**
422 * Check if an SVM write CRx intercept is set.
423 */
424# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
425
426/**
427 * Check if an SVM read DRx intercept is set.
428 */
429# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
430
431/**
432 * Check if an SVM write DRx intercept is set.
433 */
434# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
435
436/**
437 * Check if an SVM exception intercept is set.
438 */
439# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
440
441/**
442 * Invokes the SVM \#VMEXIT handler for the nested-guest.
443 */
444# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
445 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
446
447/**
448 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
449 * corresponding decode assist information.
450 */
451# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
452 do \
453 { \
454 uint64_t uExitInfo1; \
455 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
456 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
457 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
458 else \
459 uExitInfo1 = 0; \
460 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
461 } while (0)
462
463/** Check and handles SVM nested-guest instruction intercept and updates
464 * NRIP if needed.
465 */
466# define IEM_CHECK_SVM_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
467 do \
468 { \
469 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
470 { \
471 IEM_UPDATE_SVM_NRIP(a_pVCpu); \
472 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
473 } \
474 } while (0)
475
476/** Checks and handles SVM nested-guest CR0 read intercept. */
477# define IEM_CHECK_SVM_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
478 do \
479 { \
480 if (!IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
481 { /* probably likely */ } \
482 else \
483 { \
484 IEM_UPDATE_SVM_NRIP(a_pVCpu); \
485 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
486 } \
487 } while (0)
488
489/**
490 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
491 */
492# define IEM_UPDATE_SVM_NRIP(a_pVCpu) \
493 do { \
494 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
495 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
496 } while (0)
497
498#else
499# define IEM_IS_SVM_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
500# define IEM_IS_SVM_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
501# define IEM_IS_SVM_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
502# define IEM_IS_SVM_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
503# define IEM_IS_SVM_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
504# define IEM_IS_SVM_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
505# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
506# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
507# define IEM_CHECK_SVM_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
508# define IEM_CHECK_SVM_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
509# define IEM_UPDATE_SVM_NRIP(a_pVCpu) do { } while (0)
510
511#endif
512
513
514/*********************************************************************************************************************************
515* Global Variables *
516*********************************************************************************************************************************/
517extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
518
519
520/** Function table for the ADD instruction. */
521IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
522{
523 iemAImpl_add_u8, iemAImpl_add_u8_locked,
524 iemAImpl_add_u16, iemAImpl_add_u16_locked,
525 iemAImpl_add_u32, iemAImpl_add_u32_locked,
526 iemAImpl_add_u64, iemAImpl_add_u64_locked
527};
528
529/** Function table for the ADC instruction. */
530IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
531{
532 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
533 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
534 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
535 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
536};
537
538/** Function table for the SUB instruction. */
539IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
540{
541 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
542 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
543 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
544 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
545};
546
547/** Function table for the SBB instruction. */
548IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
549{
550 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
551 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
552 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
553 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
554};
555
556/** Function table for the OR instruction. */
557IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
558{
559 iemAImpl_or_u8, iemAImpl_or_u8_locked,
560 iemAImpl_or_u16, iemAImpl_or_u16_locked,
561 iemAImpl_or_u32, iemAImpl_or_u32_locked,
562 iemAImpl_or_u64, iemAImpl_or_u64_locked
563};
564
565/** Function table for the XOR instruction. */
566IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
567{
568 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
569 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
570 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
571 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
572};
573
574/** Function table for the AND instruction. */
575IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
576{
577 iemAImpl_and_u8, iemAImpl_and_u8_locked,
578 iemAImpl_and_u16, iemAImpl_and_u16_locked,
579 iemAImpl_and_u32, iemAImpl_and_u32_locked,
580 iemAImpl_and_u64, iemAImpl_and_u64_locked
581};
582
583/** Function table for the CMP instruction.
584 * @remarks Making operand order ASSUMPTIONS.
585 */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
587{
588 iemAImpl_cmp_u8, NULL,
589 iemAImpl_cmp_u16, NULL,
590 iemAImpl_cmp_u32, NULL,
591 iemAImpl_cmp_u64, NULL
592};
593
594/** Function table for the TEST instruction.
595 * @remarks Making operand order ASSUMPTIONS.
596 */
597IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
598{
599 iemAImpl_test_u8, NULL,
600 iemAImpl_test_u16, NULL,
601 iemAImpl_test_u32, NULL,
602 iemAImpl_test_u64, NULL
603};
604
605/** Function table for the BT instruction. */
606IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
607{
608 NULL, NULL,
609 iemAImpl_bt_u16, NULL,
610 iemAImpl_bt_u32, NULL,
611 iemAImpl_bt_u64, NULL
612};
613
614/** Function table for the BTC instruction. */
615IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
616{
617 NULL, NULL,
618 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
619 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
620 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
621};
622
623/** Function table for the BTR instruction. */
624IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
625{
626 NULL, NULL,
627 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
628 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
629 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
630};
631
632/** Function table for the BTS instruction. */
633IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
634{
635 NULL, NULL,
636 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
637 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
638 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
639};
640
641/** Function table for the BSF instruction. */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
643{
644 NULL, NULL,
645 iemAImpl_bsf_u16, NULL,
646 iemAImpl_bsf_u32, NULL,
647 iemAImpl_bsf_u64, NULL
648};
649
650/** Function table for the BSR instruction. */
651IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
652{
653 NULL, NULL,
654 iemAImpl_bsr_u16, NULL,
655 iemAImpl_bsr_u32, NULL,
656 iemAImpl_bsr_u64, NULL
657};
658
659/** Function table for the IMUL instruction. */
660IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
661{
662 NULL, NULL,
663 iemAImpl_imul_two_u16, NULL,
664 iemAImpl_imul_two_u32, NULL,
665 iemAImpl_imul_two_u64, NULL
666};
667
668/** Group 1 /r lookup table. */
669IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
670{
671 &g_iemAImpl_add,
672 &g_iemAImpl_or,
673 &g_iemAImpl_adc,
674 &g_iemAImpl_sbb,
675 &g_iemAImpl_and,
676 &g_iemAImpl_sub,
677 &g_iemAImpl_xor,
678 &g_iemAImpl_cmp
679};
680
681/** Function table for the INC instruction. */
682IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
683{
684 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
685 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
686 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
687 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
688};
689
690/** Function table for the DEC instruction. */
691IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
692{
693 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
694 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
695 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
696 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
697};
698
699/** Function table for the NEG instruction. */
700IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
701{
702 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
703 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
704 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
705 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
706};
707
708/** Function table for the NOT instruction. */
709IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
710{
711 iemAImpl_not_u8, iemAImpl_not_u8_locked,
712 iemAImpl_not_u16, iemAImpl_not_u16_locked,
713 iemAImpl_not_u32, iemAImpl_not_u32_locked,
714 iemAImpl_not_u64, iemAImpl_not_u64_locked
715};
716
717
718/** Function table for the ROL instruction. */
719IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
720{
721 iemAImpl_rol_u8,
722 iemAImpl_rol_u16,
723 iemAImpl_rol_u32,
724 iemAImpl_rol_u64
725};
726
727/** Function table for the ROR instruction. */
728IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
729{
730 iemAImpl_ror_u8,
731 iemAImpl_ror_u16,
732 iemAImpl_ror_u32,
733 iemAImpl_ror_u64
734};
735
736/** Function table for the RCL instruction. */
737IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
738{
739 iemAImpl_rcl_u8,
740 iemAImpl_rcl_u16,
741 iemAImpl_rcl_u32,
742 iemAImpl_rcl_u64
743};
744
745/** Function table for the RCR instruction. */
746IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
747{
748 iemAImpl_rcr_u8,
749 iemAImpl_rcr_u16,
750 iemAImpl_rcr_u32,
751 iemAImpl_rcr_u64
752};
753
754/** Function table for the SHL instruction. */
755IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
756{
757 iemAImpl_shl_u8,
758 iemAImpl_shl_u16,
759 iemAImpl_shl_u32,
760 iemAImpl_shl_u64
761};
762
763/** Function table for the SHR instruction. */
764IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
765{
766 iemAImpl_shr_u8,
767 iemAImpl_shr_u16,
768 iemAImpl_shr_u32,
769 iemAImpl_shr_u64
770};
771
772/** Function table for the SAR instruction. */
773IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
774{
775 iemAImpl_sar_u8,
776 iemAImpl_sar_u16,
777 iemAImpl_sar_u32,
778 iemAImpl_sar_u64
779};
780
781
782/** Function table for the MUL instruction. */
783IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
784{
785 iemAImpl_mul_u8,
786 iemAImpl_mul_u16,
787 iemAImpl_mul_u32,
788 iemAImpl_mul_u64
789};
790
791/** Function table for the IMUL instruction working implicitly on rAX. */
792IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
793{
794 iemAImpl_imul_u8,
795 iemAImpl_imul_u16,
796 iemAImpl_imul_u32,
797 iemAImpl_imul_u64
798};
799
800/** Function table for the DIV instruction. */
801IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
802{
803 iemAImpl_div_u8,
804 iemAImpl_div_u16,
805 iemAImpl_div_u32,
806 iemAImpl_div_u64
807};
808
809/** Function table for the MUL instruction. */
810IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
811{
812 iemAImpl_idiv_u8,
813 iemAImpl_idiv_u16,
814 iemAImpl_idiv_u32,
815 iemAImpl_idiv_u64
816};
817
818/** Function table for the SHLD instruction */
819IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
820{
821 iemAImpl_shld_u16,
822 iemAImpl_shld_u32,
823 iemAImpl_shld_u64,
824};
825
826/** Function table for the SHRD instruction */
827IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
828{
829 iemAImpl_shrd_u16,
830 iemAImpl_shrd_u32,
831 iemAImpl_shrd_u64,
832};
833
834
835/** Function table for the PUNPCKLBW instruction */
836IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
837/** Function table for the PUNPCKLBD instruction */
838IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
839/** Function table for the PUNPCKLDQ instruction */
840IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
841/** Function table for the PUNPCKLQDQ instruction */
842IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
843
844/** Function table for the PUNPCKHBW instruction */
845IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
846/** Function table for the PUNPCKHBD instruction */
847IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
848/** Function table for the PUNPCKHDQ instruction */
849IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
850/** Function table for the PUNPCKHQDQ instruction */
851IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
852
853/** Function table for the PXOR instruction */
854IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
855/** Function table for the PCMPEQB instruction */
856IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
857/** Function table for the PCMPEQW instruction */
858IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
859/** Function table for the PCMPEQD instruction */
860IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
861
862
863#if defined(IEM_LOG_MEMORY_WRITES)
864/** What IEM just wrote. */
865uint8_t g_abIemWrote[256];
866/** How much IEM just wrote. */
867size_t g_cbIemWrote;
868#endif
869
870
871/*********************************************************************************************************************************
872* Internal Functions *
873*********************************************************************************************************************************/
874IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
875IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
876IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
877IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
878/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
879IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
880IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
881IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
882IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
883IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
884IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
885IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
886IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
887IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
888IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
889IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
890IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
891#ifdef IEM_WITH_SETJMP
892DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
893DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
894DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
895DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
896DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
897#endif
898
899IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
900IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
901IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
902IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
903IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
904IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
905IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
906IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
907IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
908IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
909IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
910IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
911IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
912IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
913IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
914IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
915IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
916
917#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
918IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
919IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
920#endif
921
922
923/**
924 * Sets the pass up status.
925 *
926 * @returns VINF_SUCCESS.
927 * @param pVCpu The cross context virtual CPU structure of the
928 * calling thread.
929 * @param rcPassUp The pass up status. Must be informational.
930 * VINF_SUCCESS is not allowed.
931 */
932IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
933{
934 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
935
936 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
937 if (rcOldPassUp == VINF_SUCCESS)
938 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
939 /* If both are EM scheduling codes, use EM priority rules. */
940 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
941 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
942 {
943 if (rcPassUp < rcOldPassUp)
944 {
945 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
946 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
947 }
948 else
949 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
950 }
951 /* Override EM scheduling with specific status code. */
952 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
953 {
954 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
955 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
956 }
957 /* Don't override specific status code, first come first served. */
958 else
959 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
960 return VINF_SUCCESS;
961}
962
963
964/**
965 * Calculates the CPU mode.
966 *
967 * This is mainly for updating IEMCPU::enmCpuMode.
968 *
969 * @returns CPU mode.
970 * @param pVCpu The cross context virtual CPU structure of the
971 * calling thread.
972 */
973DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
974{
975 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
976 return IEMMODE_64BIT;
977 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
978 return IEMMODE_32BIT;
979 return IEMMODE_16BIT;
980}
981
982
983/**
984 * Initializes the execution state.
985 *
986 * @param pVCpu The cross context virtual CPU structure of the
987 * calling thread.
988 * @param fBypassHandlers Whether to bypass access handlers.
989 *
990 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
991 * side-effects in strict builds.
992 */
993DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
994{
995 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
996 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
997
998#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
999 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1000 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1001 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1002 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1003 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1004 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1005 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1006 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1007#endif
1008
1009#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1010 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1011#endif
1012 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1013 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1014#ifdef VBOX_STRICT
1015 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1016 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1017 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1018 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1019 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1020 pVCpu->iem.s.uRexReg = 127;
1021 pVCpu->iem.s.uRexB = 127;
1022 pVCpu->iem.s.offModRm = 127;
1023 pVCpu->iem.s.uRexIndex = 127;
1024 pVCpu->iem.s.iEffSeg = 127;
1025 pVCpu->iem.s.idxPrefix = 127;
1026 pVCpu->iem.s.uVex3rdReg = 127;
1027 pVCpu->iem.s.uVexLength = 127;
1028 pVCpu->iem.s.fEvexStuff = 127;
1029 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1030# ifdef IEM_WITH_CODE_TLB
1031 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1032 pVCpu->iem.s.pbInstrBuf = NULL;
1033 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1034 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1035 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1036 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1037# else
1038 pVCpu->iem.s.offOpcode = 127;
1039 pVCpu->iem.s.cbOpcode = 127;
1040# endif
1041#endif
1042
1043 pVCpu->iem.s.cActiveMappings = 0;
1044 pVCpu->iem.s.iNextMapping = 0;
1045 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1046 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1047#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1048 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1049 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1050 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1051 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1052 if (!pVCpu->iem.s.fInPatchCode)
1053 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1054#endif
1055}
1056
1057#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1058/**
1059 * Performs a minimal reinitialization of the execution state.
1060 *
1061 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1062 * 'world-switch' types operations on the CPU. Currently only nested
1063 * hardware-virtualization uses it.
1064 *
1065 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1066 */
1067IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1068{
1069 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1070 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1071
1072 pVCpu->iem.s.uCpl = uCpl;
1073 pVCpu->iem.s.enmCpuMode = enmMode;
1074 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1075 pVCpu->iem.s.enmEffAddrMode = enmMode;
1076 if (enmMode != IEMMODE_64BIT)
1077 {
1078 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1079 pVCpu->iem.s.enmEffOpSize = enmMode;
1080 }
1081 else
1082 {
1083 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1084 pVCpu->iem.s.enmEffOpSize = enmMode;
1085 }
1086 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1087#ifndef IEM_WITH_CODE_TLB
1088 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1089 pVCpu->iem.s.offOpcode = 0;
1090 pVCpu->iem.s.cbOpcode = 0;
1091#endif
1092 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1093}
1094#endif
1095
1096/**
1097 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1098 *
1099 * @param pVCpu The cross context virtual CPU structure of the
1100 * calling thread.
1101 */
1102DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1103{
1104 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1105#ifdef VBOX_STRICT
1106# ifdef IEM_WITH_CODE_TLB
1107 NOREF(pVCpu);
1108# else
1109 pVCpu->iem.s.cbOpcode = 0;
1110# endif
1111#else
1112 NOREF(pVCpu);
1113#endif
1114}
1115
1116
1117/**
1118 * Initializes the decoder state.
1119 *
1120 * iemReInitDecoder is mostly a copy of this function.
1121 *
1122 * @param pVCpu The cross context virtual CPU structure of the
1123 * calling thread.
1124 * @param fBypassHandlers Whether to bypass access handlers.
1125 */
1126DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1127{
1128 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1129 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1130
1131#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1132 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1133 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1134 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1135 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1136 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1137 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1138 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1139 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1140#endif
1141
1142#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1143 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1144#endif
1145 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1146 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1147 pVCpu->iem.s.enmCpuMode = enmMode;
1148 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1149 pVCpu->iem.s.enmEffAddrMode = enmMode;
1150 if (enmMode != IEMMODE_64BIT)
1151 {
1152 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1153 pVCpu->iem.s.enmEffOpSize = enmMode;
1154 }
1155 else
1156 {
1157 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1158 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1159 }
1160 pVCpu->iem.s.fPrefixes = 0;
1161 pVCpu->iem.s.uRexReg = 0;
1162 pVCpu->iem.s.uRexB = 0;
1163 pVCpu->iem.s.uRexIndex = 0;
1164 pVCpu->iem.s.idxPrefix = 0;
1165 pVCpu->iem.s.uVex3rdReg = 0;
1166 pVCpu->iem.s.uVexLength = 0;
1167 pVCpu->iem.s.fEvexStuff = 0;
1168 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1169#ifdef IEM_WITH_CODE_TLB
1170 pVCpu->iem.s.pbInstrBuf = NULL;
1171 pVCpu->iem.s.offInstrNextByte = 0;
1172 pVCpu->iem.s.offCurInstrStart = 0;
1173# ifdef VBOX_STRICT
1174 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1175 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1176 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1177# endif
1178#else
1179 pVCpu->iem.s.offOpcode = 0;
1180 pVCpu->iem.s.cbOpcode = 0;
1181#endif
1182 pVCpu->iem.s.offModRm = 0;
1183 pVCpu->iem.s.cActiveMappings = 0;
1184 pVCpu->iem.s.iNextMapping = 0;
1185 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1186 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1187#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1188 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1189 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1190 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1191 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1192 if (!pVCpu->iem.s.fInPatchCode)
1193 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1194#endif
1195
1196#ifdef DBGFTRACE_ENABLED
1197 switch (enmMode)
1198 {
1199 case IEMMODE_64BIT:
1200 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1201 break;
1202 case IEMMODE_32BIT:
1203 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1204 break;
1205 case IEMMODE_16BIT:
1206 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1207 break;
1208 }
1209#endif
1210}
1211
1212
1213/**
1214 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1215 *
1216 * This is mostly a copy of iemInitDecoder.
1217 *
1218 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1219 */
1220DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1221{
1222 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1223
1224#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1225 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1226 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1227 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1228 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1229 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1230 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1231 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1232 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1233#endif
1234
1235 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1236 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1237 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1238 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1239 pVCpu->iem.s.enmEffAddrMode = enmMode;
1240 if (enmMode != IEMMODE_64BIT)
1241 {
1242 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1243 pVCpu->iem.s.enmEffOpSize = enmMode;
1244 }
1245 else
1246 {
1247 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1248 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1249 }
1250 pVCpu->iem.s.fPrefixes = 0;
1251 pVCpu->iem.s.uRexReg = 0;
1252 pVCpu->iem.s.uRexB = 0;
1253 pVCpu->iem.s.uRexIndex = 0;
1254 pVCpu->iem.s.idxPrefix = 0;
1255 pVCpu->iem.s.uVex3rdReg = 0;
1256 pVCpu->iem.s.uVexLength = 0;
1257 pVCpu->iem.s.fEvexStuff = 0;
1258 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1259#ifdef IEM_WITH_CODE_TLB
1260 if (pVCpu->iem.s.pbInstrBuf)
1261 {
1262 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1263 - pVCpu->iem.s.uInstrBufPc;
1264 if (off < pVCpu->iem.s.cbInstrBufTotal)
1265 {
1266 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1267 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1268 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1269 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1270 else
1271 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1272 }
1273 else
1274 {
1275 pVCpu->iem.s.pbInstrBuf = NULL;
1276 pVCpu->iem.s.offInstrNextByte = 0;
1277 pVCpu->iem.s.offCurInstrStart = 0;
1278 pVCpu->iem.s.cbInstrBuf = 0;
1279 pVCpu->iem.s.cbInstrBufTotal = 0;
1280 }
1281 }
1282 else
1283 {
1284 pVCpu->iem.s.offInstrNextByte = 0;
1285 pVCpu->iem.s.offCurInstrStart = 0;
1286 pVCpu->iem.s.cbInstrBuf = 0;
1287 pVCpu->iem.s.cbInstrBufTotal = 0;
1288 }
1289#else
1290 pVCpu->iem.s.cbOpcode = 0;
1291 pVCpu->iem.s.offOpcode = 0;
1292#endif
1293 pVCpu->iem.s.offModRm = 0;
1294 Assert(pVCpu->iem.s.cActiveMappings == 0);
1295 pVCpu->iem.s.iNextMapping = 0;
1296 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1297 Assert(pVCpu->iem.s.fBypassHandlers == false);
1298#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1299 if (!pVCpu->iem.s.fInPatchCode)
1300 { /* likely */ }
1301 else
1302 {
1303 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1304 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1305 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1306 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1307 if (!pVCpu->iem.s.fInPatchCode)
1308 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1309 }
1310#endif
1311
1312#ifdef DBGFTRACE_ENABLED
1313 switch (enmMode)
1314 {
1315 case IEMMODE_64BIT:
1316 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1317 break;
1318 case IEMMODE_32BIT:
1319 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1320 break;
1321 case IEMMODE_16BIT:
1322 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1323 break;
1324 }
1325#endif
1326}
1327
1328
1329
1330/**
1331 * Prefetch opcodes the first time when starting executing.
1332 *
1333 * @returns Strict VBox status code.
1334 * @param pVCpu The cross context virtual CPU structure of the
1335 * calling thread.
1336 * @param fBypassHandlers Whether to bypass access handlers.
1337 */
1338IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1339{
1340 iemInitDecoder(pVCpu, fBypassHandlers);
1341
1342#ifdef IEM_WITH_CODE_TLB
1343 /** @todo Do ITLB lookup here. */
1344
1345#else /* !IEM_WITH_CODE_TLB */
1346
1347 /*
1348 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1349 *
1350 * First translate CS:rIP to a physical address.
1351 */
1352 uint32_t cbToTryRead;
1353 RTGCPTR GCPtrPC;
1354 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1355 {
1356 cbToTryRead = PAGE_SIZE;
1357 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1358 if (IEM_IS_CANONICAL(GCPtrPC))
1359 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1360 else
1361 return iemRaiseGeneralProtectionFault0(pVCpu);
1362 }
1363 else
1364 {
1365 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1366 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1367 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1368 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1369 else
1370 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1371 if (cbToTryRead) { /* likely */ }
1372 else /* overflowed */
1373 {
1374 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1375 cbToTryRead = UINT32_MAX;
1376 }
1377 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1378 Assert(GCPtrPC <= UINT32_MAX);
1379 }
1380
1381# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1382 /* Allow interpretation of patch manager code blocks since they can for
1383 instance throw #PFs for perfectly good reasons. */
1384 if (pVCpu->iem.s.fInPatchCode)
1385 {
1386 size_t cbRead = 0;
1387 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1388 AssertRCReturn(rc, rc);
1389 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1390 return VINF_SUCCESS;
1391 }
1392# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1393
1394 RTGCPHYS GCPhys;
1395 uint64_t fFlags;
1396 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1397 if (RT_SUCCESS(rc)) { /* probable */ }
1398 else
1399 {
1400 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1401 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1402 }
1403 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1404 else
1405 {
1406 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1407 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1408 }
1409 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1410 else
1411 {
1412 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1413 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1414 }
1415 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1416 /** @todo Check reserved bits and such stuff. PGM is better at doing
1417 * that, so do it when implementing the guest virtual address
1418 * TLB... */
1419
1420 /*
1421 * Read the bytes at this address.
1422 */
1423 PVM pVM = pVCpu->CTX_SUFF(pVM);
1424# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1425 size_t cbActual;
1426 if ( PATMIsEnabled(pVM)
1427 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1428 {
1429 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1430 Assert(cbActual > 0);
1431 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1432 }
1433 else
1434# endif
1435 {
1436 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1437 if (cbToTryRead > cbLeftOnPage)
1438 cbToTryRead = cbLeftOnPage;
1439 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1440 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1441
1442 if (!pVCpu->iem.s.fBypassHandlers)
1443 {
1444 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1445 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1446 { /* likely */ }
1447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1448 {
1449 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1450 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1451 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1452 }
1453 else
1454 {
1455 Log((RT_SUCCESS(rcStrict)
1456 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1457 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1458 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1459 return rcStrict;
1460 }
1461 }
1462 else
1463 {
1464 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1465 if (RT_SUCCESS(rc))
1466 { /* likely */ }
1467 else
1468 {
1469 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1470 GCPtrPC, GCPhys, rc, cbToTryRead));
1471 return rc;
1472 }
1473 }
1474 pVCpu->iem.s.cbOpcode = cbToTryRead;
1475 }
1476#endif /* !IEM_WITH_CODE_TLB */
1477 return VINF_SUCCESS;
1478}
1479
1480
1481/**
1482 * Invalidates the IEM TLBs.
1483 *
1484 * This is called internally as well as by PGM when moving GC mappings.
1485 *
1486 * @returns
1487 * @param pVCpu The cross context virtual CPU structure of the calling
1488 * thread.
1489 * @param fVmm Set when PGM calls us with a remapping.
1490 */
1491VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1492{
1493#ifdef IEM_WITH_CODE_TLB
1494 pVCpu->iem.s.cbInstrBufTotal = 0;
1495 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1496 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1497 { /* very likely */ }
1498 else
1499 {
1500 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1501 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1502 while (i-- > 0)
1503 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1504 }
1505#endif
1506
1507#ifdef IEM_WITH_DATA_TLB
1508 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1509 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1510 { /* very likely */ }
1511 else
1512 {
1513 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1514 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1515 while (i-- > 0)
1516 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1517 }
1518#endif
1519 NOREF(pVCpu); NOREF(fVmm);
1520}
1521
1522
1523/**
1524 * Invalidates a page in the TLBs.
1525 *
1526 * @param pVCpu The cross context virtual CPU structure of the calling
1527 * thread.
1528 * @param GCPtr The address of the page to invalidate
1529 */
1530VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1531{
1532#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1533 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1534 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1535 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1536 uintptr_t idx = (uint8_t)GCPtr;
1537
1538# ifdef IEM_WITH_CODE_TLB
1539 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1540 {
1541 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1542 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1543 pVCpu->iem.s.cbInstrBufTotal = 0;
1544 }
1545# endif
1546
1547# ifdef IEM_WITH_DATA_TLB
1548 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1549 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1550# endif
1551#else
1552 NOREF(pVCpu); NOREF(GCPtr);
1553#endif
1554}
1555
1556
1557/**
1558 * Invalidates the host physical aspects of the IEM TLBs.
1559 *
1560 * This is called internally as well as by PGM when moving GC mappings.
1561 *
1562 * @param pVCpu The cross context virtual CPU structure of the calling
1563 * thread.
1564 */
1565VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1566{
1567#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1568 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1569
1570# ifdef IEM_WITH_CODE_TLB
1571 pVCpu->iem.s.cbInstrBufTotal = 0;
1572# endif
1573 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1574 if (uTlbPhysRev != 0)
1575 {
1576 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1577 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1578 }
1579 else
1580 {
1581 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1582 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1583
1584 unsigned i;
1585# ifdef IEM_WITH_CODE_TLB
1586 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1587 while (i-- > 0)
1588 {
1589 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1590 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1591 }
1592# endif
1593# ifdef IEM_WITH_DATA_TLB
1594 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1595 while (i-- > 0)
1596 {
1597 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1598 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1599 }
1600# endif
1601 }
1602#else
1603 NOREF(pVCpu);
1604#endif
1605}
1606
1607
1608/**
1609 * Invalidates the host physical aspects of the IEM TLBs.
1610 *
1611 * This is called internally as well as by PGM when moving GC mappings.
1612 *
1613 * @param pVM The cross context VM structure.
1614 *
1615 * @remarks Caller holds the PGM lock.
1616 */
1617VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1618{
1619 RT_NOREF_PV(pVM);
1620}
1621
1622#ifdef IEM_WITH_CODE_TLB
1623
1624/**
1625 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1626 * failure and jumps.
1627 *
1628 * We end up here for a number of reasons:
1629 * - pbInstrBuf isn't yet initialized.
1630 * - Advancing beyond the buffer boundrary (e.g. cross page).
1631 * - Advancing beyond the CS segment limit.
1632 * - Fetching from non-mappable page (e.g. MMIO).
1633 *
1634 * @param pVCpu The cross context virtual CPU structure of the
1635 * calling thread.
1636 * @param pvDst Where to return the bytes.
1637 * @param cbDst Number of bytes to read.
1638 *
1639 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1640 */
1641IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1642{
1643#ifdef IN_RING3
1644 for (;;)
1645 {
1646 Assert(cbDst <= 8);
1647 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1648
1649 /*
1650 * We might have a partial buffer match, deal with that first to make the
1651 * rest simpler. This is the first part of the cross page/buffer case.
1652 */
1653 if (pVCpu->iem.s.pbInstrBuf != NULL)
1654 {
1655 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1656 {
1657 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1658 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1659 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1660
1661 cbDst -= cbCopy;
1662 pvDst = (uint8_t *)pvDst + cbCopy;
1663 offBuf += cbCopy;
1664 pVCpu->iem.s.offInstrNextByte += offBuf;
1665 }
1666 }
1667
1668 /*
1669 * Check segment limit, figuring how much we're allowed to access at this point.
1670 *
1671 * We will fault immediately if RIP is past the segment limit / in non-canonical
1672 * territory. If we do continue, there are one or more bytes to read before we
1673 * end up in trouble and we need to do that first before faulting.
1674 */
1675 RTGCPTR GCPtrFirst;
1676 uint32_t cbMaxRead;
1677 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1678 {
1679 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1680 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1681 { /* likely */ }
1682 else
1683 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1684 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1685 }
1686 else
1687 {
1688 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1689 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1690 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1691 { /* likely */ }
1692 else
1693 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1694 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1695 if (cbMaxRead != 0)
1696 { /* likely */ }
1697 else
1698 {
1699 /* Overflowed because address is 0 and limit is max. */
1700 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1701 cbMaxRead = X86_PAGE_SIZE;
1702 }
1703 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1704 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1705 if (cbMaxRead2 < cbMaxRead)
1706 cbMaxRead = cbMaxRead2;
1707 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1708 }
1709
1710 /*
1711 * Get the TLB entry for this piece of code.
1712 */
1713 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1714 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1715 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1716 if (pTlbe->uTag == uTag)
1717 {
1718 /* likely when executing lots of code, otherwise unlikely */
1719# ifdef VBOX_WITH_STATISTICS
1720 pVCpu->iem.s.CodeTlb.cTlbHits++;
1721# endif
1722 }
1723 else
1724 {
1725 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1726# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1727 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1728 {
1729 pTlbe->uTag = uTag;
1730 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1731 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1732 pTlbe->GCPhys = NIL_RTGCPHYS;
1733 pTlbe->pbMappingR3 = NULL;
1734 }
1735 else
1736# endif
1737 {
1738 RTGCPHYS GCPhys;
1739 uint64_t fFlags;
1740 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1741 if (RT_FAILURE(rc))
1742 {
1743 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1744 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1745 }
1746
1747 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1748 pTlbe->uTag = uTag;
1749 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1750 pTlbe->GCPhys = GCPhys;
1751 pTlbe->pbMappingR3 = NULL;
1752 }
1753 }
1754
1755 /*
1756 * Check TLB page table level access flags.
1757 */
1758 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1759 {
1760 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1761 {
1762 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1763 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1764 }
1765 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1766 {
1767 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1768 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1769 }
1770 }
1771
1772# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1773 /*
1774 * Allow interpretation of patch manager code blocks since they can for
1775 * instance throw #PFs for perfectly good reasons.
1776 */
1777 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1778 { /* no unlikely */ }
1779 else
1780 {
1781 /** @todo Could be optimized this a little in ring-3 if we liked. */
1782 size_t cbRead = 0;
1783 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1784 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1785 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1786 return;
1787 }
1788# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1789
1790 /*
1791 * Look up the physical page info if necessary.
1792 */
1793 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1794 { /* not necessary */ }
1795 else
1796 {
1797 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1798 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1799 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1800 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1801 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1802 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1803 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1804 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1805 }
1806
1807# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1808 /*
1809 * Try do a direct read using the pbMappingR3 pointer.
1810 */
1811 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1812 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1813 {
1814 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1815 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1816 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1817 {
1818 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1819 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1820 }
1821 else
1822 {
1823 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1824 Assert(cbInstr < cbMaxRead);
1825 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1826 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1827 }
1828 if (cbDst <= cbMaxRead)
1829 {
1830 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1831 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1832 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1833 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1834 return;
1835 }
1836 pVCpu->iem.s.pbInstrBuf = NULL;
1837
1838 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1839 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1840 }
1841 else
1842# endif
1843#if 0
1844 /*
1845 * If there is no special read handling, so we can read a bit more and
1846 * put it in the prefetch buffer.
1847 */
1848 if ( cbDst < cbMaxRead
1849 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1850 {
1851 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1852 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1853 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1854 { /* likely */ }
1855 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1856 {
1857 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1858 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1859 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1860 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1861 }
1862 else
1863 {
1864 Log((RT_SUCCESS(rcStrict)
1865 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1866 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1867 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1868 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1869 }
1870 }
1871 /*
1872 * Special read handling, so only read exactly what's needed.
1873 * This is a highly unlikely scenario.
1874 */
1875 else
1876#endif
1877 {
1878 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1879 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1880 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1881 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1882 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1883 { /* likely */ }
1884 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1885 {
1886 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1887 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1888 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1889 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1890 }
1891 else
1892 {
1893 Log((RT_SUCCESS(rcStrict)
1894 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1895 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1896 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1897 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1898 }
1899 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1900 if (cbToRead == cbDst)
1901 return;
1902 }
1903
1904 /*
1905 * More to read, loop.
1906 */
1907 cbDst -= cbMaxRead;
1908 pvDst = (uint8_t *)pvDst + cbMaxRead;
1909 }
1910#else
1911 RT_NOREF(pvDst, cbDst);
1912 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1913#endif
1914}
1915
1916#else
1917
1918/**
1919 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1920 * exception if it fails.
1921 *
1922 * @returns Strict VBox status code.
1923 * @param pVCpu The cross context virtual CPU structure of the
1924 * calling thread.
1925 * @param cbMin The minimum number of bytes relative offOpcode
1926 * that must be read.
1927 */
1928IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1929{
1930 /*
1931 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1932 *
1933 * First translate CS:rIP to a physical address.
1934 */
1935 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1936 uint32_t cbToTryRead;
1937 RTGCPTR GCPtrNext;
1938 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1939 {
1940 cbToTryRead = PAGE_SIZE;
1941 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1942 if (!IEM_IS_CANONICAL(GCPtrNext))
1943 return iemRaiseGeneralProtectionFault0(pVCpu);
1944 }
1945 else
1946 {
1947 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1948 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1949 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1950 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1951 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1952 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1953 if (!cbToTryRead) /* overflowed */
1954 {
1955 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1956 cbToTryRead = UINT32_MAX;
1957 /** @todo check out wrapping around the code segment. */
1958 }
1959 if (cbToTryRead < cbMin - cbLeft)
1960 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1961 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1962 }
1963
1964 /* Only read up to the end of the page, and make sure we don't read more
1965 than the opcode buffer can hold. */
1966 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1967 if (cbToTryRead > cbLeftOnPage)
1968 cbToTryRead = cbLeftOnPage;
1969 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1970 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1971/** @todo r=bird: Convert assertion into undefined opcode exception? */
1972 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1973
1974# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1975 /* Allow interpretation of patch manager code blocks since they can for
1976 instance throw #PFs for perfectly good reasons. */
1977 if (pVCpu->iem.s.fInPatchCode)
1978 {
1979 size_t cbRead = 0;
1980 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
1981 AssertRCReturn(rc, rc);
1982 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1983 return VINF_SUCCESS;
1984 }
1985# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1986
1987 RTGCPHYS GCPhys;
1988 uint64_t fFlags;
1989 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1990 if (RT_FAILURE(rc))
1991 {
1992 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1993 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1994 }
1995 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1996 {
1997 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1998 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1999 }
2000 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2001 {
2002 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2003 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2004 }
2005 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2006 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2007 /** @todo Check reserved bits and such stuff. PGM is better at doing
2008 * that, so do it when implementing the guest virtual address
2009 * TLB... */
2010
2011 /*
2012 * Read the bytes at this address.
2013 *
2014 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2015 * and since PATM should only patch the start of an instruction there
2016 * should be no need to check again here.
2017 */
2018 if (!pVCpu->iem.s.fBypassHandlers)
2019 {
2020 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2021 cbToTryRead, PGMACCESSORIGIN_IEM);
2022 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2023 { /* likely */ }
2024 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2025 {
2026 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2027 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2028 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2029 }
2030 else
2031 {
2032 Log((RT_SUCCESS(rcStrict)
2033 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2034 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2035 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2036 return rcStrict;
2037 }
2038 }
2039 else
2040 {
2041 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2042 if (RT_SUCCESS(rc))
2043 { /* likely */ }
2044 else
2045 {
2046 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2047 return rc;
2048 }
2049 }
2050 pVCpu->iem.s.cbOpcode += cbToTryRead;
2051 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2052
2053 return VINF_SUCCESS;
2054}
2055
2056#endif /* !IEM_WITH_CODE_TLB */
2057#ifndef IEM_WITH_SETJMP
2058
2059/**
2060 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2061 *
2062 * @returns Strict VBox status code.
2063 * @param pVCpu The cross context virtual CPU structure of the
2064 * calling thread.
2065 * @param pb Where to return the opcode byte.
2066 */
2067DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2068{
2069 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2070 if (rcStrict == VINF_SUCCESS)
2071 {
2072 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2073 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2074 pVCpu->iem.s.offOpcode = offOpcode + 1;
2075 }
2076 else
2077 *pb = 0;
2078 return rcStrict;
2079}
2080
2081
2082/**
2083 * Fetches the next opcode byte.
2084 *
2085 * @returns Strict VBox status code.
2086 * @param pVCpu The cross context virtual CPU structure of the
2087 * calling thread.
2088 * @param pu8 Where to return the opcode byte.
2089 */
2090DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2091{
2092 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2093 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2094 {
2095 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2096 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2097 return VINF_SUCCESS;
2098 }
2099 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2100}
2101
2102#else /* IEM_WITH_SETJMP */
2103
2104/**
2105 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2106 *
2107 * @returns The opcode byte.
2108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2109 */
2110DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2111{
2112# ifdef IEM_WITH_CODE_TLB
2113 uint8_t u8;
2114 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2115 return u8;
2116# else
2117 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2118 if (rcStrict == VINF_SUCCESS)
2119 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2120 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2121# endif
2122}
2123
2124
2125/**
2126 * Fetches the next opcode byte, longjmp on error.
2127 *
2128 * @returns The opcode byte.
2129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2130 */
2131DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2132{
2133# ifdef IEM_WITH_CODE_TLB
2134 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2135 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2136 if (RT_LIKELY( pbBuf != NULL
2137 && offBuf < pVCpu->iem.s.cbInstrBuf))
2138 {
2139 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2140 return pbBuf[offBuf];
2141 }
2142# else
2143 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2144 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2145 {
2146 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2147 return pVCpu->iem.s.abOpcode[offOpcode];
2148 }
2149# endif
2150 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2151}
2152
2153#endif /* IEM_WITH_SETJMP */
2154
2155/**
2156 * Fetches the next opcode byte, returns automatically on failure.
2157 *
2158 * @param a_pu8 Where to return the opcode byte.
2159 * @remark Implicitly references pVCpu.
2160 */
2161#ifndef IEM_WITH_SETJMP
2162# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2163 do \
2164 { \
2165 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2166 if (rcStrict2 == VINF_SUCCESS) \
2167 { /* likely */ } \
2168 else \
2169 return rcStrict2; \
2170 } while (0)
2171#else
2172# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2173#endif /* IEM_WITH_SETJMP */
2174
2175
2176#ifndef IEM_WITH_SETJMP
2177/**
2178 * Fetches the next signed byte from the opcode stream.
2179 *
2180 * @returns Strict VBox status code.
2181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2182 * @param pi8 Where to return the signed byte.
2183 */
2184DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2185{
2186 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2187}
2188#endif /* !IEM_WITH_SETJMP */
2189
2190
2191/**
2192 * Fetches the next signed byte from the opcode stream, returning automatically
2193 * on failure.
2194 *
2195 * @param a_pi8 Where to return the signed byte.
2196 * @remark Implicitly references pVCpu.
2197 */
2198#ifndef IEM_WITH_SETJMP
2199# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2200 do \
2201 { \
2202 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2203 if (rcStrict2 != VINF_SUCCESS) \
2204 return rcStrict2; \
2205 } while (0)
2206#else /* IEM_WITH_SETJMP */
2207# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2208
2209#endif /* IEM_WITH_SETJMP */
2210
2211#ifndef IEM_WITH_SETJMP
2212
2213/**
2214 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2215 *
2216 * @returns Strict VBox status code.
2217 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2218 * @param pu16 Where to return the opcode dword.
2219 */
2220DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2221{
2222 uint8_t u8;
2223 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2224 if (rcStrict == VINF_SUCCESS)
2225 *pu16 = (int8_t)u8;
2226 return rcStrict;
2227}
2228
2229
2230/**
2231 * Fetches the next signed byte from the opcode stream, extending it to
2232 * unsigned 16-bit.
2233 *
2234 * @returns Strict VBox status code.
2235 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2236 * @param pu16 Where to return the unsigned word.
2237 */
2238DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2239{
2240 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2241 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2242 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2243
2244 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2245 pVCpu->iem.s.offOpcode = offOpcode + 1;
2246 return VINF_SUCCESS;
2247}
2248
2249#endif /* !IEM_WITH_SETJMP */
2250
2251/**
2252 * Fetches the next signed byte from the opcode stream and sign-extending it to
2253 * a word, returning automatically on failure.
2254 *
2255 * @param a_pu16 Where to return the word.
2256 * @remark Implicitly references pVCpu.
2257 */
2258#ifndef IEM_WITH_SETJMP
2259# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2260 do \
2261 { \
2262 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2263 if (rcStrict2 != VINF_SUCCESS) \
2264 return rcStrict2; \
2265 } while (0)
2266#else
2267# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2268#endif
2269
2270#ifndef IEM_WITH_SETJMP
2271
2272/**
2273 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2274 *
2275 * @returns Strict VBox status code.
2276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2277 * @param pu32 Where to return the opcode dword.
2278 */
2279DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2280{
2281 uint8_t u8;
2282 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2283 if (rcStrict == VINF_SUCCESS)
2284 *pu32 = (int8_t)u8;
2285 return rcStrict;
2286}
2287
2288
2289/**
2290 * Fetches the next signed byte from the opcode stream, extending it to
2291 * unsigned 32-bit.
2292 *
2293 * @returns Strict VBox status code.
2294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2295 * @param pu32 Where to return the unsigned dword.
2296 */
2297DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2298{
2299 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2300 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2301 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2302
2303 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2304 pVCpu->iem.s.offOpcode = offOpcode + 1;
2305 return VINF_SUCCESS;
2306}
2307
2308#endif /* !IEM_WITH_SETJMP */
2309
2310/**
2311 * Fetches the next signed byte from the opcode stream and sign-extending it to
2312 * a word, returning automatically on failure.
2313 *
2314 * @param a_pu32 Where to return the word.
2315 * @remark Implicitly references pVCpu.
2316 */
2317#ifndef IEM_WITH_SETJMP
2318#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2319 do \
2320 { \
2321 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2322 if (rcStrict2 != VINF_SUCCESS) \
2323 return rcStrict2; \
2324 } while (0)
2325#else
2326# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2327#endif
2328
2329#ifndef IEM_WITH_SETJMP
2330
2331/**
2332 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2333 *
2334 * @returns Strict VBox status code.
2335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2336 * @param pu64 Where to return the opcode qword.
2337 */
2338DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2339{
2340 uint8_t u8;
2341 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2342 if (rcStrict == VINF_SUCCESS)
2343 *pu64 = (int8_t)u8;
2344 return rcStrict;
2345}
2346
2347
2348/**
2349 * Fetches the next signed byte from the opcode stream, extending it to
2350 * unsigned 64-bit.
2351 *
2352 * @returns Strict VBox status code.
2353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2354 * @param pu64 Where to return the unsigned qword.
2355 */
2356DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2357{
2358 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2359 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2360 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2361
2362 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2363 pVCpu->iem.s.offOpcode = offOpcode + 1;
2364 return VINF_SUCCESS;
2365}
2366
2367#endif /* !IEM_WITH_SETJMP */
2368
2369
2370/**
2371 * Fetches the next signed byte from the opcode stream and sign-extending it to
2372 * a word, returning automatically on failure.
2373 *
2374 * @param a_pu64 Where to return the word.
2375 * @remark Implicitly references pVCpu.
2376 */
2377#ifndef IEM_WITH_SETJMP
2378# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2379 do \
2380 { \
2381 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2382 if (rcStrict2 != VINF_SUCCESS) \
2383 return rcStrict2; \
2384 } while (0)
2385#else
2386# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2387#endif
2388
2389
2390#ifndef IEM_WITH_SETJMP
2391/**
2392 * Fetches the next opcode byte.
2393 *
2394 * @returns Strict VBox status code.
2395 * @param pVCpu The cross context virtual CPU structure of the
2396 * calling thread.
2397 * @param pu8 Where to return the opcode byte.
2398 */
2399DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2400{
2401 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2402 pVCpu->iem.s.offModRm = offOpcode;
2403 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2404 {
2405 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2406 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2407 return VINF_SUCCESS;
2408 }
2409 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2410}
2411#else /* IEM_WITH_SETJMP */
2412/**
2413 * Fetches the next opcode byte, longjmp on error.
2414 *
2415 * @returns The opcode byte.
2416 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2417 */
2418DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2419{
2420# ifdef IEM_WITH_CODE_TLB
2421 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2422 pVCpu->iem.s.offModRm = offBuf;
2423 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2424 if (RT_LIKELY( pbBuf != NULL
2425 && offBuf < pVCpu->iem.s.cbInstrBuf))
2426 {
2427 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2428 return pbBuf[offBuf];
2429 }
2430# else
2431 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2432 pVCpu->iem.s.offModRm = offOpcode;
2433 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2434 {
2435 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2436 return pVCpu->iem.s.abOpcode[offOpcode];
2437 }
2438# endif
2439 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2440}
2441#endif /* IEM_WITH_SETJMP */
2442
2443/**
2444 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2445 * on failure.
2446 *
2447 * Will note down the position of the ModR/M byte for VT-x exits.
2448 *
2449 * @param a_pbRm Where to return the RM opcode byte.
2450 * @remark Implicitly references pVCpu.
2451 */
2452#ifndef IEM_WITH_SETJMP
2453# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2454 do \
2455 { \
2456 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2457 if (rcStrict2 == VINF_SUCCESS) \
2458 { /* likely */ } \
2459 else \
2460 return rcStrict2; \
2461 } while (0)
2462#else
2463# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2464#endif /* IEM_WITH_SETJMP */
2465
2466
2467#ifndef IEM_WITH_SETJMP
2468
2469/**
2470 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2471 *
2472 * @returns Strict VBox status code.
2473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2474 * @param pu16 Where to return the opcode word.
2475 */
2476DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2477{
2478 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2479 if (rcStrict == VINF_SUCCESS)
2480 {
2481 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2482# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2483 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2484# else
2485 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2486# endif
2487 pVCpu->iem.s.offOpcode = offOpcode + 2;
2488 }
2489 else
2490 *pu16 = 0;
2491 return rcStrict;
2492}
2493
2494
2495/**
2496 * Fetches the next opcode word.
2497 *
2498 * @returns Strict VBox status code.
2499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2500 * @param pu16 Where to return the opcode word.
2501 */
2502DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2503{
2504 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2505 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2506 {
2507 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2508# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2509 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2510# else
2511 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2512# endif
2513 return VINF_SUCCESS;
2514 }
2515 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2516}
2517
2518#else /* IEM_WITH_SETJMP */
2519
2520/**
2521 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2522 *
2523 * @returns The opcode word.
2524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2525 */
2526DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2527{
2528# ifdef IEM_WITH_CODE_TLB
2529 uint16_t u16;
2530 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2531 return u16;
2532# else
2533 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2534 if (rcStrict == VINF_SUCCESS)
2535 {
2536 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2537 pVCpu->iem.s.offOpcode += 2;
2538# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2539 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2540# else
2541 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2542# endif
2543 }
2544 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2545# endif
2546}
2547
2548
2549/**
2550 * Fetches the next opcode word, longjmp on error.
2551 *
2552 * @returns The opcode word.
2553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2554 */
2555DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2556{
2557# ifdef IEM_WITH_CODE_TLB
2558 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2559 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2560 if (RT_LIKELY( pbBuf != NULL
2561 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2562 {
2563 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2564# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2565 return *(uint16_t const *)&pbBuf[offBuf];
2566# else
2567 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2568# endif
2569 }
2570# else
2571 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2572 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2573 {
2574 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2575# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2576 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2577# else
2578 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2579# endif
2580 }
2581# endif
2582 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2583}
2584
2585#endif /* IEM_WITH_SETJMP */
2586
2587
2588/**
2589 * Fetches the next opcode word, returns automatically on failure.
2590 *
2591 * @param a_pu16 Where to return the opcode word.
2592 * @remark Implicitly references pVCpu.
2593 */
2594#ifndef IEM_WITH_SETJMP
2595# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2596 do \
2597 { \
2598 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2599 if (rcStrict2 != VINF_SUCCESS) \
2600 return rcStrict2; \
2601 } while (0)
2602#else
2603# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2604#endif
2605
2606#ifndef IEM_WITH_SETJMP
2607
2608/**
2609 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2610 *
2611 * @returns Strict VBox status code.
2612 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2613 * @param pu32 Where to return the opcode double word.
2614 */
2615DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2616{
2617 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2618 if (rcStrict == VINF_SUCCESS)
2619 {
2620 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2621 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2622 pVCpu->iem.s.offOpcode = offOpcode + 2;
2623 }
2624 else
2625 *pu32 = 0;
2626 return rcStrict;
2627}
2628
2629
2630/**
2631 * Fetches the next opcode word, zero extending it to a double word.
2632 *
2633 * @returns Strict VBox status code.
2634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2635 * @param pu32 Where to return the opcode double word.
2636 */
2637DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2638{
2639 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2640 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2641 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2642
2643 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2644 pVCpu->iem.s.offOpcode = offOpcode + 2;
2645 return VINF_SUCCESS;
2646}
2647
2648#endif /* !IEM_WITH_SETJMP */
2649
2650
2651/**
2652 * Fetches the next opcode word and zero extends it to a double word, returns
2653 * automatically on failure.
2654 *
2655 * @param a_pu32 Where to return the opcode double word.
2656 * @remark Implicitly references pVCpu.
2657 */
2658#ifndef IEM_WITH_SETJMP
2659# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2660 do \
2661 { \
2662 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2663 if (rcStrict2 != VINF_SUCCESS) \
2664 return rcStrict2; \
2665 } while (0)
2666#else
2667# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2668#endif
2669
2670#ifndef IEM_WITH_SETJMP
2671
2672/**
2673 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2674 *
2675 * @returns Strict VBox status code.
2676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2677 * @param pu64 Where to return the opcode quad word.
2678 */
2679DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2680{
2681 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2682 if (rcStrict == VINF_SUCCESS)
2683 {
2684 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2685 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2686 pVCpu->iem.s.offOpcode = offOpcode + 2;
2687 }
2688 else
2689 *pu64 = 0;
2690 return rcStrict;
2691}
2692
2693
2694/**
2695 * Fetches the next opcode word, zero extending it to a quad word.
2696 *
2697 * @returns Strict VBox status code.
2698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2699 * @param pu64 Where to return the opcode quad word.
2700 */
2701DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2702{
2703 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2704 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2705 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2706
2707 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2708 pVCpu->iem.s.offOpcode = offOpcode + 2;
2709 return VINF_SUCCESS;
2710}
2711
2712#endif /* !IEM_WITH_SETJMP */
2713
2714/**
2715 * Fetches the next opcode word and zero extends it to a quad word, returns
2716 * automatically on failure.
2717 *
2718 * @param a_pu64 Where to return the opcode quad word.
2719 * @remark Implicitly references pVCpu.
2720 */
2721#ifndef IEM_WITH_SETJMP
2722# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2723 do \
2724 { \
2725 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2726 if (rcStrict2 != VINF_SUCCESS) \
2727 return rcStrict2; \
2728 } while (0)
2729#else
2730# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2731#endif
2732
2733
2734#ifndef IEM_WITH_SETJMP
2735/**
2736 * Fetches the next signed word from the opcode stream.
2737 *
2738 * @returns Strict VBox status code.
2739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2740 * @param pi16 Where to return the signed word.
2741 */
2742DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2743{
2744 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2745}
2746#endif /* !IEM_WITH_SETJMP */
2747
2748
2749/**
2750 * Fetches the next signed word from the opcode stream, returning automatically
2751 * on failure.
2752 *
2753 * @param a_pi16 Where to return the signed word.
2754 * @remark Implicitly references pVCpu.
2755 */
2756#ifndef IEM_WITH_SETJMP
2757# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2758 do \
2759 { \
2760 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2761 if (rcStrict2 != VINF_SUCCESS) \
2762 return rcStrict2; \
2763 } while (0)
2764#else
2765# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2766#endif
2767
2768#ifndef IEM_WITH_SETJMP
2769
2770/**
2771 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2772 *
2773 * @returns Strict VBox status code.
2774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2775 * @param pu32 Where to return the opcode dword.
2776 */
2777DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2778{
2779 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2780 if (rcStrict == VINF_SUCCESS)
2781 {
2782 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2783# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2784 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2785# else
2786 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2787 pVCpu->iem.s.abOpcode[offOpcode + 1],
2788 pVCpu->iem.s.abOpcode[offOpcode + 2],
2789 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2790# endif
2791 pVCpu->iem.s.offOpcode = offOpcode + 4;
2792 }
2793 else
2794 *pu32 = 0;
2795 return rcStrict;
2796}
2797
2798
2799/**
2800 * Fetches the next opcode dword.
2801 *
2802 * @returns Strict VBox status code.
2803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2804 * @param pu32 Where to return the opcode double word.
2805 */
2806DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2807{
2808 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2809 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2810 {
2811 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2812# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2813 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2814# else
2815 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2816 pVCpu->iem.s.abOpcode[offOpcode + 1],
2817 pVCpu->iem.s.abOpcode[offOpcode + 2],
2818 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2819# endif
2820 return VINF_SUCCESS;
2821 }
2822 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2823}
2824
2825#else /* !IEM_WITH_SETJMP */
2826
2827/**
2828 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2829 *
2830 * @returns The opcode dword.
2831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2832 */
2833DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2834{
2835# ifdef IEM_WITH_CODE_TLB
2836 uint32_t u32;
2837 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2838 return u32;
2839# else
2840 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2841 if (rcStrict == VINF_SUCCESS)
2842 {
2843 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2844 pVCpu->iem.s.offOpcode = offOpcode + 4;
2845# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2846 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2847# else
2848 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2849 pVCpu->iem.s.abOpcode[offOpcode + 1],
2850 pVCpu->iem.s.abOpcode[offOpcode + 2],
2851 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2852# endif
2853 }
2854 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2855# endif
2856}
2857
2858
2859/**
2860 * Fetches the next opcode dword, longjmp on error.
2861 *
2862 * @returns The opcode dword.
2863 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2864 */
2865DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2866{
2867# ifdef IEM_WITH_CODE_TLB
2868 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2869 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2870 if (RT_LIKELY( pbBuf != NULL
2871 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2872 {
2873 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2874# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2875 return *(uint32_t const *)&pbBuf[offBuf];
2876# else
2877 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2878 pbBuf[offBuf + 1],
2879 pbBuf[offBuf + 2],
2880 pbBuf[offBuf + 3]);
2881# endif
2882 }
2883# else
2884 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2885 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2886 {
2887 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2888# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2889 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2890# else
2891 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2892 pVCpu->iem.s.abOpcode[offOpcode + 1],
2893 pVCpu->iem.s.abOpcode[offOpcode + 2],
2894 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2895# endif
2896 }
2897# endif
2898 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2899}
2900
2901#endif /* !IEM_WITH_SETJMP */
2902
2903
2904/**
2905 * Fetches the next opcode dword, returns automatically on failure.
2906 *
2907 * @param a_pu32 Where to return the opcode dword.
2908 * @remark Implicitly references pVCpu.
2909 */
2910#ifndef IEM_WITH_SETJMP
2911# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2912 do \
2913 { \
2914 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2915 if (rcStrict2 != VINF_SUCCESS) \
2916 return rcStrict2; \
2917 } while (0)
2918#else
2919# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2920#endif
2921
2922#ifndef IEM_WITH_SETJMP
2923
2924/**
2925 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2926 *
2927 * @returns Strict VBox status code.
2928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2929 * @param pu64 Where to return the opcode dword.
2930 */
2931DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2932{
2933 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2934 if (rcStrict == VINF_SUCCESS)
2935 {
2936 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2937 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2938 pVCpu->iem.s.abOpcode[offOpcode + 1],
2939 pVCpu->iem.s.abOpcode[offOpcode + 2],
2940 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2941 pVCpu->iem.s.offOpcode = offOpcode + 4;
2942 }
2943 else
2944 *pu64 = 0;
2945 return rcStrict;
2946}
2947
2948
2949/**
2950 * Fetches the next opcode dword, zero extending it to a quad word.
2951 *
2952 * @returns Strict VBox status code.
2953 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2954 * @param pu64 Where to return the opcode quad word.
2955 */
2956DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2957{
2958 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2959 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2960 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2961
2962 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2963 pVCpu->iem.s.abOpcode[offOpcode + 1],
2964 pVCpu->iem.s.abOpcode[offOpcode + 2],
2965 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2966 pVCpu->iem.s.offOpcode = offOpcode + 4;
2967 return VINF_SUCCESS;
2968}
2969
2970#endif /* !IEM_WITH_SETJMP */
2971
2972
2973/**
2974 * Fetches the next opcode dword and zero extends it to a quad word, returns
2975 * automatically on failure.
2976 *
2977 * @param a_pu64 Where to return the opcode quad word.
2978 * @remark Implicitly references pVCpu.
2979 */
2980#ifndef IEM_WITH_SETJMP
2981# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2982 do \
2983 { \
2984 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2985 if (rcStrict2 != VINF_SUCCESS) \
2986 return rcStrict2; \
2987 } while (0)
2988#else
2989# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2990#endif
2991
2992
2993#ifndef IEM_WITH_SETJMP
2994/**
2995 * Fetches the next signed double word from the opcode stream.
2996 *
2997 * @returns Strict VBox status code.
2998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2999 * @param pi32 Where to return the signed double word.
3000 */
3001DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3002{
3003 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3004}
3005#endif
3006
3007/**
3008 * Fetches the next signed double word from the opcode stream, returning
3009 * automatically on failure.
3010 *
3011 * @param a_pi32 Where to return the signed double word.
3012 * @remark Implicitly references pVCpu.
3013 */
3014#ifndef IEM_WITH_SETJMP
3015# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3016 do \
3017 { \
3018 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3019 if (rcStrict2 != VINF_SUCCESS) \
3020 return rcStrict2; \
3021 } while (0)
3022#else
3023# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3024#endif
3025
3026#ifndef IEM_WITH_SETJMP
3027
3028/**
3029 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3030 *
3031 * @returns Strict VBox status code.
3032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3033 * @param pu64 Where to return the opcode qword.
3034 */
3035DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3036{
3037 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3038 if (rcStrict == VINF_SUCCESS)
3039 {
3040 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3041 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3042 pVCpu->iem.s.abOpcode[offOpcode + 1],
3043 pVCpu->iem.s.abOpcode[offOpcode + 2],
3044 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3045 pVCpu->iem.s.offOpcode = offOpcode + 4;
3046 }
3047 else
3048 *pu64 = 0;
3049 return rcStrict;
3050}
3051
3052
3053/**
3054 * Fetches the next opcode dword, sign extending it into a quad word.
3055 *
3056 * @returns Strict VBox status code.
3057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3058 * @param pu64 Where to return the opcode quad word.
3059 */
3060DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3061{
3062 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3063 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3064 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3065
3066 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3067 pVCpu->iem.s.abOpcode[offOpcode + 1],
3068 pVCpu->iem.s.abOpcode[offOpcode + 2],
3069 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3070 *pu64 = i32;
3071 pVCpu->iem.s.offOpcode = offOpcode + 4;
3072 return VINF_SUCCESS;
3073}
3074
3075#endif /* !IEM_WITH_SETJMP */
3076
3077
3078/**
3079 * Fetches the next opcode double word and sign extends it to a quad word,
3080 * returns automatically on failure.
3081 *
3082 * @param a_pu64 Where to return the opcode quad word.
3083 * @remark Implicitly references pVCpu.
3084 */
3085#ifndef IEM_WITH_SETJMP
3086# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3087 do \
3088 { \
3089 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3090 if (rcStrict2 != VINF_SUCCESS) \
3091 return rcStrict2; \
3092 } while (0)
3093#else
3094# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3095#endif
3096
3097#ifndef IEM_WITH_SETJMP
3098
3099/**
3100 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3101 *
3102 * @returns Strict VBox status code.
3103 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3104 * @param pu64 Where to return the opcode qword.
3105 */
3106DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3107{
3108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3109 if (rcStrict == VINF_SUCCESS)
3110 {
3111 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3112# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3113 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3114# else
3115 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3116 pVCpu->iem.s.abOpcode[offOpcode + 1],
3117 pVCpu->iem.s.abOpcode[offOpcode + 2],
3118 pVCpu->iem.s.abOpcode[offOpcode + 3],
3119 pVCpu->iem.s.abOpcode[offOpcode + 4],
3120 pVCpu->iem.s.abOpcode[offOpcode + 5],
3121 pVCpu->iem.s.abOpcode[offOpcode + 6],
3122 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3123# endif
3124 pVCpu->iem.s.offOpcode = offOpcode + 8;
3125 }
3126 else
3127 *pu64 = 0;
3128 return rcStrict;
3129}
3130
3131
3132/**
3133 * Fetches the next opcode qword.
3134 *
3135 * @returns Strict VBox status code.
3136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3137 * @param pu64 Where to return the opcode qword.
3138 */
3139DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3140{
3141 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3142 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3143 {
3144# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3145 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3146# else
3147 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3148 pVCpu->iem.s.abOpcode[offOpcode + 1],
3149 pVCpu->iem.s.abOpcode[offOpcode + 2],
3150 pVCpu->iem.s.abOpcode[offOpcode + 3],
3151 pVCpu->iem.s.abOpcode[offOpcode + 4],
3152 pVCpu->iem.s.abOpcode[offOpcode + 5],
3153 pVCpu->iem.s.abOpcode[offOpcode + 6],
3154 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3155# endif
3156 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3157 return VINF_SUCCESS;
3158 }
3159 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3160}
3161
3162#else /* IEM_WITH_SETJMP */
3163
3164/**
3165 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3166 *
3167 * @returns The opcode qword.
3168 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3169 */
3170DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3171{
3172# ifdef IEM_WITH_CODE_TLB
3173 uint64_t u64;
3174 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3175 return u64;
3176# else
3177 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3178 if (rcStrict == VINF_SUCCESS)
3179 {
3180 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3181 pVCpu->iem.s.offOpcode = offOpcode + 8;
3182# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3183 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3184# else
3185 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3186 pVCpu->iem.s.abOpcode[offOpcode + 1],
3187 pVCpu->iem.s.abOpcode[offOpcode + 2],
3188 pVCpu->iem.s.abOpcode[offOpcode + 3],
3189 pVCpu->iem.s.abOpcode[offOpcode + 4],
3190 pVCpu->iem.s.abOpcode[offOpcode + 5],
3191 pVCpu->iem.s.abOpcode[offOpcode + 6],
3192 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3193# endif
3194 }
3195 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3196# endif
3197}
3198
3199
3200/**
3201 * Fetches the next opcode qword, longjmp on error.
3202 *
3203 * @returns The opcode qword.
3204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3205 */
3206DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3207{
3208# ifdef IEM_WITH_CODE_TLB
3209 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3210 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3211 if (RT_LIKELY( pbBuf != NULL
3212 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3213 {
3214 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3215# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3216 return *(uint64_t const *)&pbBuf[offBuf];
3217# else
3218 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3219 pbBuf[offBuf + 1],
3220 pbBuf[offBuf + 2],
3221 pbBuf[offBuf + 3],
3222 pbBuf[offBuf + 4],
3223 pbBuf[offBuf + 5],
3224 pbBuf[offBuf + 6],
3225 pbBuf[offBuf + 7]);
3226# endif
3227 }
3228# else
3229 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3230 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3231 {
3232 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3233# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3234 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3235# else
3236 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3237 pVCpu->iem.s.abOpcode[offOpcode + 1],
3238 pVCpu->iem.s.abOpcode[offOpcode + 2],
3239 pVCpu->iem.s.abOpcode[offOpcode + 3],
3240 pVCpu->iem.s.abOpcode[offOpcode + 4],
3241 pVCpu->iem.s.abOpcode[offOpcode + 5],
3242 pVCpu->iem.s.abOpcode[offOpcode + 6],
3243 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3244# endif
3245 }
3246# endif
3247 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3248}
3249
3250#endif /* IEM_WITH_SETJMP */
3251
3252/**
3253 * Fetches the next opcode quad word, returns automatically on failure.
3254 *
3255 * @param a_pu64 Where to return the opcode quad word.
3256 * @remark Implicitly references pVCpu.
3257 */
3258#ifndef IEM_WITH_SETJMP
3259# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3260 do \
3261 { \
3262 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3263 if (rcStrict2 != VINF_SUCCESS) \
3264 return rcStrict2; \
3265 } while (0)
3266#else
3267# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3268#endif
3269
3270
3271/** @name Misc Worker Functions.
3272 * @{
3273 */
3274
3275/**
3276 * Gets the exception class for the specified exception vector.
3277 *
3278 * @returns The class of the specified exception.
3279 * @param uVector The exception vector.
3280 */
3281IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3282{
3283 Assert(uVector <= X86_XCPT_LAST);
3284 switch (uVector)
3285 {
3286 case X86_XCPT_DE:
3287 case X86_XCPT_TS:
3288 case X86_XCPT_NP:
3289 case X86_XCPT_SS:
3290 case X86_XCPT_GP:
3291 case X86_XCPT_SX: /* AMD only */
3292 return IEMXCPTCLASS_CONTRIBUTORY;
3293
3294 case X86_XCPT_PF:
3295 case X86_XCPT_VE: /* Intel only */
3296 return IEMXCPTCLASS_PAGE_FAULT;
3297
3298 case X86_XCPT_DF:
3299 return IEMXCPTCLASS_DOUBLE_FAULT;
3300 }
3301 return IEMXCPTCLASS_BENIGN;
3302}
3303
3304
3305/**
3306 * Evaluates how to handle an exception caused during delivery of another event
3307 * (exception / interrupt).
3308 *
3309 * @returns How to handle the recursive exception.
3310 * @param pVCpu The cross context virtual CPU structure of the
3311 * calling thread.
3312 * @param fPrevFlags The flags of the previous event.
3313 * @param uPrevVector The vector of the previous event.
3314 * @param fCurFlags The flags of the current exception.
3315 * @param uCurVector The vector of the current exception.
3316 * @param pfXcptRaiseInfo Where to store additional information about the
3317 * exception condition. Optional.
3318 */
3319VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3320 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3321{
3322 /*
3323 * Only CPU exceptions can be raised while delivering other events, software interrupt
3324 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3325 */
3326 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3327 Assert(pVCpu); RT_NOREF(pVCpu);
3328 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3329
3330 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3331 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3332 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3333 {
3334 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3335 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3336 {
3337 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3338 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3339 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3340 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3341 {
3342 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3343 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3344 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3345 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3346 uCurVector, pVCpu->cpum.GstCtx.cr2));
3347 }
3348 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3349 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3350 {
3351 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3352 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3353 }
3354 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3355 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3356 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3357 {
3358 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3359 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3360 }
3361 }
3362 else
3363 {
3364 if (uPrevVector == X86_XCPT_NMI)
3365 {
3366 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3367 if (uCurVector == X86_XCPT_PF)
3368 {
3369 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3370 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3371 }
3372 }
3373 else if ( uPrevVector == X86_XCPT_AC
3374 && uCurVector == X86_XCPT_AC)
3375 {
3376 enmRaise = IEMXCPTRAISE_CPU_HANG;
3377 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3378 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3379 }
3380 }
3381 }
3382 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3383 {
3384 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3385 if (uCurVector == X86_XCPT_PF)
3386 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3387 }
3388 else
3389 {
3390 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3391 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3392 }
3393
3394 if (pfXcptRaiseInfo)
3395 *pfXcptRaiseInfo = fRaiseInfo;
3396 return enmRaise;
3397}
3398
3399
3400/**
3401 * Enters the CPU shutdown state initiated by a triple fault or other
3402 * unrecoverable conditions.
3403 *
3404 * @returns Strict VBox status code.
3405 * @param pVCpu The cross context virtual CPU structure of the
3406 * calling thread.
3407 */
3408IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3409{
3410 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3411 {
3412 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3413 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3414 }
3415
3416 RT_NOREF(pVCpu);
3417 return VINF_EM_TRIPLE_FAULT;
3418}
3419
3420
3421/**
3422 * Validates a new SS segment.
3423 *
3424 * @returns VBox strict status code.
3425 * @param pVCpu The cross context virtual CPU structure of the
3426 * calling thread.
3427 * @param NewSS The new SS selctor.
3428 * @param uCpl The CPL to load the stack for.
3429 * @param pDesc Where to return the descriptor.
3430 */
3431IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3432{
3433 /* Null selectors are not allowed (we're not called for dispatching
3434 interrupts with SS=0 in long mode). */
3435 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3436 {
3437 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3438 return iemRaiseTaskSwitchFault0(pVCpu);
3439 }
3440
3441 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3442 if ((NewSS & X86_SEL_RPL) != uCpl)
3443 {
3444 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3445 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3446 }
3447
3448 /*
3449 * Read the descriptor.
3450 */
3451 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3452 if (rcStrict != VINF_SUCCESS)
3453 return rcStrict;
3454
3455 /*
3456 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3457 */
3458 if (!pDesc->Legacy.Gen.u1DescType)
3459 {
3460 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3461 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3462 }
3463
3464 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3465 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3466 {
3467 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3468 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3469 }
3470 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3471 {
3472 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3473 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3474 }
3475
3476 /* Is it there? */
3477 /** @todo testcase: Is this checked before the canonical / limit check below? */
3478 if (!pDesc->Legacy.Gen.u1Present)
3479 {
3480 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3481 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3482 }
3483
3484 return VINF_SUCCESS;
3485}
3486
3487
3488/**
3489 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3490 * not.
3491 *
3492 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3493 */
3494#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3495# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3496#else
3497# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3498#endif
3499
3500/**
3501 * Updates the EFLAGS in the correct manner wrt. PATM.
3502 *
3503 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3504 * @param a_fEfl The new EFLAGS.
3505 */
3506#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3507# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3508#else
3509# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3510#endif
3511
3512
3513/** @} */
3514
3515/** @name Raising Exceptions.
3516 *
3517 * @{
3518 */
3519
3520
3521/**
3522 * Loads the specified stack far pointer from the TSS.
3523 *
3524 * @returns VBox strict status code.
3525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3526 * @param uCpl The CPL to load the stack for.
3527 * @param pSelSS Where to return the new stack segment.
3528 * @param puEsp Where to return the new stack pointer.
3529 */
3530IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3531{
3532 VBOXSTRICTRC rcStrict;
3533 Assert(uCpl < 4);
3534
3535 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3536 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3537 {
3538 /*
3539 * 16-bit TSS (X86TSS16).
3540 */
3541 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3542 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3543 {
3544 uint32_t off = uCpl * 4 + 2;
3545 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3546 {
3547 /** @todo check actual access pattern here. */
3548 uint32_t u32Tmp = 0; /* gcc maybe... */
3549 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3550 if (rcStrict == VINF_SUCCESS)
3551 {
3552 *puEsp = RT_LOWORD(u32Tmp);
3553 *pSelSS = RT_HIWORD(u32Tmp);
3554 return VINF_SUCCESS;
3555 }
3556 }
3557 else
3558 {
3559 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3560 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3561 }
3562 break;
3563 }
3564
3565 /*
3566 * 32-bit TSS (X86TSS32).
3567 */
3568 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3569 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3570 {
3571 uint32_t off = uCpl * 8 + 4;
3572 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3573 {
3574/** @todo check actual access pattern here. */
3575 uint64_t u64Tmp;
3576 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3577 if (rcStrict == VINF_SUCCESS)
3578 {
3579 *puEsp = u64Tmp & UINT32_MAX;
3580 *pSelSS = (RTSEL)(u64Tmp >> 32);
3581 return VINF_SUCCESS;
3582 }
3583 }
3584 else
3585 {
3586 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3587 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3588 }
3589 break;
3590 }
3591
3592 default:
3593 AssertFailed();
3594 rcStrict = VERR_IEM_IPE_4;
3595 break;
3596 }
3597
3598 *puEsp = 0; /* make gcc happy */
3599 *pSelSS = 0; /* make gcc happy */
3600 return rcStrict;
3601}
3602
3603
3604/**
3605 * Loads the specified stack pointer from the 64-bit TSS.
3606 *
3607 * @returns VBox strict status code.
3608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3609 * @param uCpl The CPL to load the stack for.
3610 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3611 * @param puRsp Where to return the new stack pointer.
3612 */
3613IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3614{
3615 Assert(uCpl < 4);
3616 Assert(uIst < 8);
3617 *puRsp = 0; /* make gcc happy */
3618
3619 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3620 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3621
3622 uint32_t off;
3623 if (uIst)
3624 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3625 else
3626 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3627 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3628 {
3629 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3630 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3631 }
3632
3633 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3634}
3635
3636
3637/**
3638 * Adjust the CPU state according to the exception being raised.
3639 *
3640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3641 * @param u8Vector The exception that has been raised.
3642 */
3643DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3644{
3645 switch (u8Vector)
3646 {
3647 case X86_XCPT_DB:
3648 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3649 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3650 break;
3651 /** @todo Read the AMD and Intel exception reference... */
3652 }
3653}
3654
3655
3656/**
3657 * Implements exceptions and interrupts for real mode.
3658 *
3659 * @returns VBox strict status code.
3660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3661 * @param cbInstr The number of bytes to offset rIP by in the return
3662 * address.
3663 * @param u8Vector The interrupt / exception vector number.
3664 * @param fFlags The flags.
3665 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3666 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3667 */
3668IEM_STATIC VBOXSTRICTRC
3669iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3670 uint8_t cbInstr,
3671 uint8_t u8Vector,
3672 uint32_t fFlags,
3673 uint16_t uErr,
3674 uint64_t uCr2)
3675{
3676 NOREF(uErr); NOREF(uCr2);
3677 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3678
3679 /*
3680 * Read the IDT entry.
3681 */
3682 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3683 {
3684 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3685 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3686 }
3687 RTFAR16 Idte;
3688 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3689 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3690 {
3691 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3692 return rcStrict;
3693 }
3694
3695 /*
3696 * Push the stack frame.
3697 */
3698 uint16_t *pu16Frame;
3699 uint64_t uNewRsp;
3700 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3701 if (rcStrict != VINF_SUCCESS)
3702 return rcStrict;
3703
3704 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3705#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3706 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3707 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3708 fEfl |= UINT16_C(0xf000);
3709#endif
3710 pu16Frame[2] = (uint16_t)fEfl;
3711 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3712 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3713 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3714 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3715 return rcStrict;
3716
3717 /*
3718 * Load the vector address into cs:ip and make exception specific state
3719 * adjustments.
3720 */
3721 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3722 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3723 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3724 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3725 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3726 pVCpu->cpum.GstCtx.rip = Idte.off;
3727 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3728 IEMMISC_SET_EFL(pVCpu, fEfl);
3729
3730 /** @todo do we actually do this in real mode? */
3731 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3732 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3733
3734 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3735}
3736
3737
3738/**
3739 * Loads a NULL data selector into when coming from V8086 mode.
3740 *
3741 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3742 * @param pSReg Pointer to the segment register.
3743 */
3744IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3745{
3746 pSReg->Sel = 0;
3747 pSReg->ValidSel = 0;
3748 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3749 {
3750 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3751 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3752 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3753 }
3754 else
3755 {
3756 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3757 /** @todo check this on AMD-V */
3758 pSReg->u64Base = 0;
3759 pSReg->u32Limit = 0;
3760 }
3761}
3762
3763
3764/**
3765 * Loads a segment selector during a task switch in V8086 mode.
3766 *
3767 * @param pSReg Pointer to the segment register.
3768 * @param uSel The selector value to load.
3769 */
3770IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3771{
3772 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3773 pSReg->Sel = uSel;
3774 pSReg->ValidSel = uSel;
3775 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3776 pSReg->u64Base = uSel << 4;
3777 pSReg->u32Limit = 0xffff;
3778 pSReg->Attr.u = 0xf3;
3779}
3780
3781
3782/**
3783 * Loads a NULL data selector into a selector register, both the hidden and
3784 * visible parts, in protected mode.
3785 *
3786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3787 * @param pSReg Pointer to the segment register.
3788 * @param uRpl The RPL.
3789 */
3790IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3791{
3792 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3793 * data selector in protected mode. */
3794 pSReg->Sel = uRpl;
3795 pSReg->ValidSel = uRpl;
3796 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3797 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3798 {
3799 /* VT-x (Intel 3960x) observed doing something like this. */
3800 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3801 pSReg->u32Limit = UINT32_MAX;
3802 pSReg->u64Base = 0;
3803 }
3804 else
3805 {
3806 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3807 pSReg->u32Limit = 0;
3808 pSReg->u64Base = 0;
3809 }
3810}
3811
3812
3813/**
3814 * Loads a segment selector during a task switch in protected mode.
3815 *
3816 * In this task switch scenario, we would throw \#TS exceptions rather than
3817 * \#GPs.
3818 *
3819 * @returns VBox strict status code.
3820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3821 * @param pSReg Pointer to the segment register.
3822 * @param uSel The new selector value.
3823 *
3824 * @remarks This does _not_ handle CS or SS.
3825 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3826 */
3827IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3828{
3829 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3830
3831 /* Null data selector. */
3832 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3833 {
3834 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3835 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3836 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3837 return VINF_SUCCESS;
3838 }
3839
3840 /* Fetch the descriptor. */
3841 IEMSELDESC Desc;
3842 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3843 if (rcStrict != VINF_SUCCESS)
3844 {
3845 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3846 VBOXSTRICTRC_VAL(rcStrict)));
3847 return rcStrict;
3848 }
3849
3850 /* Must be a data segment or readable code segment. */
3851 if ( !Desc.Legacy.Gen.u1DescType
3852 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3853 {
3854 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3855 Desc.Legacy.Gen.u4Type));
3856 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3857 }
3858
3859 /* Check privileges for data segments and non-conforming code segments. */
3860 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3861 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3862 {
3863 /* The RPL and the new CPL must be less than or equal to the DPL. */
3864 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3865 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3866 {
3867 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3868 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3869 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3870 }
3871 }
3872
3873 /* Is it there? */
3874 if (!Desc.Legacy.Gen.u1Present)
3875 {
3876 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3877 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3878 }
3879
3880 /* The base and limit. */
3881 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3882 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3883
3884 /*
3885 * Ok, everything checked out fine. Now set the accessed bit before
3886 * committing the result into the registers.
3887 */
3888 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3889 {
3890 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3891 if (rcStrict != VINF_SUCCESS)
3892 return rcStrict;
3893 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3894 }
3895
3896 /* Commit */
3897 pSReg->Sel = uSel;
3898 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3899 pSReg->u32Limit = cbLimit;
3900 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3901 pSReg->ValidSel = uSel;
3902 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3903 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3904 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3905
3906 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3907 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3908 return VINF_SUCCESS;
3909}
3910
3911
3912/**
3913 * Performs a task switch.
3914 *
3915 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3916 * caller is responsible for performing the necessary checks (like DPL, TSS
3917 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3918 * reference for JMP, CALL, IRET.
3919 *
3920 * If the task switch is the due to a software interrupt or hardware exception,
3921 * the caller is responsible for validating the TSS selector and descriptor. See
3922 * Intel Instruction reference for INT n.
3923 *
3924 * @returns VBox strict status code.
3925 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3926 * @param enmTaskSwitch What caused this task switch.
3927 * @param uNextEip The EIP effective after the task switch.
3928 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3929 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3930 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3931 * @param SelTSS The TSS selector of the new task.
3932 * @param pNewDescTSS Pointer to the new TSS descriptor.
3933 */
3934IEM_STATIC VBOXSTRICTRC
3935iemTaskSwitch(PVMCPU pVCpu,
3936 IEMTASKSWITCH enmTaskSwitch,
3937 uint32_t uNextEip,
3938 uint32_t fFlags,
3939 uint16_t uErr,
3940 uint64_t uCr2,
3941 RTSEL SelTSS,
3942 PIEMSELDESC pNewDescTSS)
3943{
3944 Assert(!IEM_IS_REAL_MODE(pVCpu));
3945 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3946 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3947
3948 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3949 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3950 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3951 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3952 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3953
3954 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3955 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3956
3957 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3958 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3959
3960 /* Update CR2 in case it's a page-fault. */
3961 /** @todo This should probably be done much earlier in IEM/PGM. See
3962 * @bugref{5653#c49}. */
3963 if (fFlags & IEM_XCPT_FLAGS_CR2)
3964 pVCpu->cpum.GstCtx.cr2 = uCr2;
3965
3966 /*
3967 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3968 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3969 */
3970 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3971 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3972 if (uNewTSSLimit < uNewTSSLimitMin)
3973 {
3974 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3975 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3976 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3977 }
3978
3979 /*
3980 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3981 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3982 */
3983 if (IEM_IS_SVM_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3984 {
3985 uint32_t const uExitInfo1 = SelTSS;
3986 uint32_t uExitInfo2 = uErr;
3987 switch (enmTaskSwitch)
3988 {
3989 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3990 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3991 default: break;
3992 }
3993 if (fFlags & IEM_XCPT_FLAGS_ERR)
3994 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3995 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3996 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3997
3998 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3999 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4000 RT_NOREF2(uExitInfo1, uExitInfo2);
4001 }
4002 /** @todo Nested-VMX task-switch intercept. */
4003
4004 /*
4005 * Check the current TSS limit. The last written byte to the current TSS during the
4006 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4007 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4008 *
4009 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4010 * end up with smaller than "legal" TSS limits.
4011 */
4012 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4013 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4014 if (uCurTSSLimit < uCurTSSLimitMin)
4015 {
4016 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4017 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4018 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4019 }
4020
4021 /*
4022 * Verify that the new TSS can be accessed and map it. Map only the required contents
4023 * and not the entire TSS.
4024 */
4025 void *pvNewTSS;
4026 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4027 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4028 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4029 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4030 * not perform correct translation if this happens. See Intel spec. 7.2.1
4031 * "Task-State Segment" */
4032 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4033 if (rcStrict != VINF_SUCCESS)
4034 {
4035 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4036 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4037 return rcStrict;
4038 }
4039
4040 /*
4041 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4042 */
4043 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4044 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4045 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4046 {
4047 PX86DESC pDescCurTSS;
4048 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4049 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4050 if (rcStrict != VINF_SUCCESS)
4051 {
4052 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4053 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4054 return rcStrict;
4055 }
4056
4057 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4058 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4059 if (rcStrict != VINF_SUCCESS)
4060 {
4061 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4062 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4063 return rcStrict;
4064 }
4065
4066 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4067 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4068 {
4069 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4070 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4071 u32EFlags &= ~X86_EFL_NT;
4072 }
4073 }
4074
4075 /*
4076 * Save the CPU state into the current TSS.
4077 */
4078 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4079 if (GCPtrNewTSS == GCPtrCurTSS)
4080 {
4081 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4082 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4083 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ldtr.Sel));
4084 }
4085 if (fIsNewTSS386)
4086 {
4087 /*
4088 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4089 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4090 */
4091 void *pvCurTSS32;
4092 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4093 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4094 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4095 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4096 if (rcStrict != VINF_SUCCESS)
4097 {
4098 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4099 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4100 return rcStrict;
4101 }
4102
4103 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4104 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4105 pCurTSS32->eip = uNextEip;
4106 pCurTSS32->eflags = u32EFlags;
4107 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4108 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4109 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4110 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4111 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4112 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4113 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4114 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4115 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4116 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4117 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4118 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4119 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4120 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4121
4122 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4123 if (rcStrict != VINF_SUCCESS)
4124 {
4125 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4126 VBOXSTRICTRC_VAL(rcStrict)));
4127 return rcStrict;
4128 }
4129 }
4130 else
4131 {
4132 /*
4133 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4134 */
4135 void *pvCurTSS16;
4136 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4137 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4138 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4139 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4140 if (rcStrict != VINF_SUCCESS)
4141 {
4142 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4143 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4144 return rcStrict;
4145 }
4146
4147 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4148 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4149 pCurTSS16->ip = uNextEip;
4150 pCurTSS16->flags = u32EFlags;
4151 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4152 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4153 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4154 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4155 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4156 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4157 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4158 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4159 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4160 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4161 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4162 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4163
4164 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4165 if (rcStrict != VINF_SUCCESS)
4166 {
4167 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4168 VBOXSTRICTRC_VAL(rcStrict)));
4169 return rcStrict;
4170 }
4171 }
4172
4173 /*
4174 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4175 */
4176 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4177 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4178 {
4179 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4180 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4181 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4182 }
4183
4184 /*
4185 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4186 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4187 */
4188 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4189 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4190 bool fNewDebugTrap;
4191 if (fIsNewTSS386)
4192 {
4193 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4194 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4195 uNewEip = pNewTSS32->eip;
4196 uNewEflags = pNewTSS32->eflags;
4197 uNewEax = pNewTSS32->eax;
4198 uNewEcx = pNewTSS32->ecx;
4199 uNewEdx = pNewTSS32->edx;
4200 uNewEbx = pNewTSS32->ebx;
4201 uNewEsp = pNewTSS32->esp;
4202 uNewEbp = pNewTSS32->ebp;
4203 uNewEsi = pNewTSS32->esi;
4204 uNewEdi = pNewTSS32->edi;
4205 uNewES = pNewTSS32->es;
4206 uNewCS = pNewTSS32->cs;
4207 uNewSS = pNewTSS32->ss;
4208 uNewDS = pNewTSS32->ds;
4209 uNewFS = pNewTSS32->fs;
4210 uNewGS = pNewTSS32->gs;
4211 uNewLdt = pNewTSS32->selLdt;
4212 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4213 }
4214 else
4215 {
4216 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4217 uNewCr3 = 0;
4218 uNewEip = pNewTSS16->ip;
4219 uNewEflags = pNewTSS16->flags;
4220 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4221 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4222 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4223 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4224 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4225 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4226 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4227 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4228 uNewES = pNewTSS16->es;
4229 uNewCS = pNewTSS16->cs;
4230 uNewSS = pNewTSS16->ss;
4231 uNewDS = pNewTSS16->ds;
4232 uNewFS = 0;
4233 uNewGS = 0;
4234 uNewLdt = pNewTSS16->selLdt;
4235 fNewDebugTrap = false;
4236 }
4237
4238 if (GCPtrNewTSS == GCPtrCurTSS)
4239 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4240 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4241
4242 /*
4243 * We're done accessing the new TSS.
4244 */
4245 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4246 if (rcStrict != VINF_SUCCESS)
4247 {
4248 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4249 return rcStrict;
4250 }
4251
4252 /*
4253 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4254 */
4255 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4256 {
4257 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4258 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4259 if (rcStrict != VINF_SUCCESS)
4260 {
4261 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4262 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4263 return rcStrict;
4264 }
4265
4266 /* Check that the descriptor indicates the new TSS is available (not busy). */
4267 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4268 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4269 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4270
4271 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4272 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4273 if (rcStrict != VINF_SUCCESS)
4274 {
4275 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4276 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4277 return rcStrict;
4278 }
4279 }
4280
4281 /*
4282 * From this point on, we're technically in the new task. We will defer exceptions
4283 * until the completion of the task switch but before executing any instructions in the new task.
4284 */
4285 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4286 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4287 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4288 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4289 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4290 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4291 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4292
4293 /* Set the busy bit in TR. */
4294 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4295 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4296 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4297 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4298 {
4299 uNewEflags |= X86_EFL_NT;
4300 }
4301
4302 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4303 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4304 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4305
4306 pVCpu->cpum.GstCtx.eip = uNewEip;
4307 pVCpu->cpum.GstCtx.eax = uNewEax;
4308 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4309 pVCpu->cpum.GstCtx.edx = uNewEdx;
4310 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4311 pVCpu->cpum.GstCtx.esp = uNewEsp;
4312 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4313 pVCpu->cpum.GstCtx.esi = uNewEsi;
4314 pVCpu->cpum.GstCtx.edi = uNewEdi;
4315
4316 uNewEflags &= X86_EFL_LIVE_MASK;
4317 uNewEflags |= X86_EFL_RA1_MASK;
4318 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4319
4320 /*
4321 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4322 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4323 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4324 */
4325 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4326 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4327
4328 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4329 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4330
4331 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4332 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4333
4334 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4335 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4336
4337 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4338 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4339
4340 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4341 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4342 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4343
4344 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4345 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4346 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4347 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4348
4349 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4350 {
4351 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4352 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4353 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4354 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4355 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4356 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4357 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4358 }
4359
4360 /*
4361 * Switch CR3 for the new task.
4362 */
4363 if ( fIsNewTSS386
4364 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4365 {
4366 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4367 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4368 AssertRCSuccessReturn(rc, rc);
4369
4370 /* Inform PGM. */
4371 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4372 AssertRCReturn(rc, rc);
4373 /* ignore informational status codes */
4374
4375 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4376 }
4377
4378 /*
4379 * Switch LDTR for the new task.
4380 */
4381 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4382 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4383 else
4384 {
4385 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4386
4387 IEMSELDESC DescNewLdt;
4388 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4389 if (rcStrict != VINF_SUCCESS)
4390 {
4391 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4392 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4393 return rcStrict;
4394 }
4395 if ( !DescNewLdt.Legacy.Gen.u1Present
4396 || DescNewLdt.Legacy.Gen.u1DescType
4397 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4398 {
4399 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4400 uNewLdt, DescNewLdt.Legacy.u));
4401 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4402 }
4403
4404 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4405 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4406 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4407 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4408 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4409 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4410 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4411 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4412 }
4413
4414 IEMSELDESC DescSS;
4415 if (IEM_IS_V86_MODE(pVCpu))
4416 {
4417 pVCpu->iem.s.uCpl = 3;
4418 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4419 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4420 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4421 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4422 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4423 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4424
4425 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4426 DescSS.Legacy.u = 0;
4427 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4428 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4429 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4430 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4431 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4432 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4433 DescSS.Legacy.Gen.u2Dpl = 3;
4434 }
4435 else
4436 {
4437 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4438
4439 /*
4440 * Load the stack segment for the new task.
4441 */
4442 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4443 {
4444 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4445 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4446 }
4447
4448 /* Fetch the descriptor. */
4449 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4450 if (rcStrict != VINF_SUCCESS)
4451 {
4452 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4453 VBOXSTRICTRC_VAL(rcStrict)));
4454 return rcStrict;
4455 }
4456
4457 /* SS must be a data segment and writable. */
4458 if ( !DescSS.Legacy.Gen.u1DescType
4459 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4460 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4461 {
4462 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4463 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4464 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4465 }
4466
4467 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4468 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4469 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4470 {
4471 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4472 uNewCpl));
4473 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4474 }
4475
4476 /* Is it there? */
4477 if (!DescSS.Legacy.Gen.u1Present)
4478 {
4479 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4480 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4481 }
4482
4483 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4484 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4485
4486 /* Set the accessed bit before committing the result into SS. */
4487 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4488 {
4489 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4490 if (rcStrict != VINF_SUCCESS)
4491 return rcStrict;
4492 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4493 }
4494
4495 /* Commit SS. */
4496 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4497 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4498 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4499 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4500 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4501 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4502 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4503
4504 /* CPL has changed, update IEM before loading rest of segments. */
4505 pVCpu->iem.s.uCpl = uNewCpl;
4506
4507 /*
4508 * Load the data segments for the new task.
4509 */
4510 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4511 if (rcStrict != VINF_SUCCESS)
4512 return rcStrict;
4513 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4514 if (rcStrict != VINF_SUCCESS)
4515 return rcStrict;
4516 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4517 if (rcStrict != VINF_SUCCESS)
4518 return rcStrict;
4519 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4520 if (rcStrict != VINF_SUCCESS)
4521 return rcStrict;
4522
4523 /*
4524 * Load the code segment for the new task.
4525 */
4526 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4527 {
4528 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4529 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4530 }
4531
4532 /* Fetch the descriptor. */
4533 IEMSELDESC DescCS;
4534 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4535 if (rcStrict != VINF_SUCCESS)
4536 {
4537 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4538 return rcStrict;
4539 }
4540
4541 /* CS must be a code segment. */
4542 if ( !DescCS.Legacy.Gen.u1DescType
4543 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4544 {
4545 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4546 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4547 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4548 }
4549
4550 /* For conforming CS, DPL must be less than or equal to the RPL. */
4551 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4552 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4553 {
4554 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4555 DescCS.Legacy.Gen.u2Dpl));
4556 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4557 }
4558
4559 /* For non-conforming CS, DPL must match RPL. */
4560 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4561 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4562 {
4563 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4564 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4565 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4566 }
4567
4568 /* Is it there? */
4569 if (!DescCS.Legacy.Gen.u1Present)
4570 {
4571 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4572 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4573 }
4574
4575 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4576 u64Base = X86DESC_BASE(&DescCS.Legacy);
4577
4578 /* Set the accessed bit before committing the result into CS. */
4579 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4580 {
4581 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4582 if (rcStrict != VINF_SUCCESS)
4583 return rcStrict;
4584 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4585 }
4586
4587 /* Commit CS. */
4588 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4589 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4590 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4591 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4592 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4593 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4594 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4595 }
4596
4597 /** @todo Debug trap. */
4598 if (fIsNewTSS386 && fNewDebugTrap)
4599 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4600
4601 /*
4602 * Construct the error code masks based on what caused this task switch.
4603 * See Intel Instruction reference for INT.
4604 */
4605 uint16_t uExt;
4606 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4607 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4608 {
4609 uExt = 1;
4610 }
4611 else
4612 uExt = 0;
4613
4614 /*
4615 * Push any error code on to the new stack.
4616 */
4617 if (fFlags & IEM_XCPT_FLAGS_ERR)
4618 {
4619 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4620 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4621 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4622
4623 /* Check that there is sufficient space on the stack. */
4624 /** @todo Factor out segment limit checking for normal/expand down segments
4625 * into a separate function. */
4626 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4627 {
4628 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4629 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4630 {
4631 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4632 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4633 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4634 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4635 }
4636 }
4637 else
4638 {
4639 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4640 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4641 {
4642 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4643 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4644 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4645 }
4646 }
4647
4648
4649 if (fIsNewTSS386)
4650 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4651 else
4652 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4653 if (rcStrict != VINF_SUCCESS)
4654 {
4655 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4656 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4657 return rcStrict;
4658 }
4659 }
4660
4661 /* Check the new EIP against the new CS limit. */
4662 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4663 {
4664 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4665 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4666 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4667 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4668 }
4669
4670 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.ss.Sel));
4671 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4672}
4673
4674
4675/**
4676 * Implements exceptions and interrupts for protected mode.
4677 *
4678 * @returns VBox strict status code.
4679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4680 * @param cbInstr The number of bytes to offset rIP by in the return
4681 * address.
4682 * @param u8Vector The interrupt / exception vector number.
4683 * @param fFlags The flags.
4684 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4685 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4686 */
4687IEM_STATIC VBOXSTRICTRC
4688iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4689 uint8_t cbInstr,
4690 uint8_t u8Vector,
4691 uint32_t fFlags,
4692 uint16_t uErr,
4693 uint64_t uCr2)
4694{
4695 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4696
4697 /*
4698 * Read the IDT entry.
4699 */
4700 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4701 {
4702 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4703 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4704 }
4705 X86DESC Idte;
4706 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4707 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4708 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4709 {
4710 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4711 return rcStrict;
4712 }
4713 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4714 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4715 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4716
4717 /*
4718 * Check the descriptor type, DPL and such.
4719 * ASSUMES this is done in the same order as described for call-gate calls.
4720 */
4721 if (Idte.Gate.u1DescType)
4722 {
4723 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4724 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4725 }
4726 bool fTaskGate = false;
4727 uint8_t f32BitGate = true;
4728 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4729 switch (Idte.Gate.u4Type)
4730 {
4731 case X86_SEL_TYPE_SYS_UNDEFINED:
4732 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4733 case X86_SEL_TYPE_SYS_LDT:
4734 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4735 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4736 case X86_SEL_TYPE_SYS_UNDEFINED2:
4737 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4738 case X86_SEL_TYPE_SYS_UNDEFINED3:
4739 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4740 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4741 case X86_SEL_TYPE_SYS_UNDEFINED4:
4742 {
4743 /** @todo check what actually happens when the type is wrong...
4744 * esp. call gates. */
4745 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4746 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4747 }
4748
4749 case X86_SEL_TYPE_SYS_286_INT_GATE:
4750 f32BitGate = false;
4751 RT_FALL_THRU();
4752 case X86_SEL_TYPE_SYS_386_INT_GATE:
4753 fEflToClear |= X86_EFL_IF;
4754 break;
4755
4756 case X86_SEL_TYPE_SYS_TASK_GATE:
4757 fTaskGate = true;
4758#ifndef IEM_IMPLEMENTS_TASKSWITCH
4759 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4760#endif
4761 break;
4762
4763 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4764 f32BitGate = false;
4765 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4766 break;
4767
4768 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4769 }
4770
4771 /* Check DPL against CPL if applicable. */
4772 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4773 {
4774 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4775 {
4776 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4777 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4778 }
4779 }
4780
4781 /* Is it there? */
4782 if (!Idte.Gate.u1Present)
4783 {
4784 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4785 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4786 }
4787
4788 /* Is it a task-gate? */
4789 if (fTaskGate)
4790 {
4791 /*
4792 * Construct the error code masks based on what caused this task switch.
4793 * See Intel Instruction reference for INT.
4794 */
4795 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4796 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4797 RTSEL SelTSS = Idte.Gate.u16Sel;
4798
4799 /*
4800 * Fetch the TSS descriptor in the GDT.
4801 */
4802 IEMSELDESC DescTSS;
4803 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4804 if (rcStrict != VINF_SUCCESS)
4805 {
4806 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4807 VBOXSTRICTRC_VAL(rcStrict)));
4808 return rcStrict;
4809 }
4810
4811 /* The TSS descriptor must be a system segment and be available (not busy). */
4812 if ( DescTSS.Legacy.Gen.u1DescType
4813 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4814 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4815 {
4816 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4817 u8Vector, SelTSS, DescTSS.Legacy.au64));
4818 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4819 }
4820
4821 /* The TSS must be present. */
4822 if (!DescTSS.Legacy.Gen.u1Present)
4823 {
4824 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4825 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4826 }
4827
4828 /* Do the actual task switch. */
4829 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT, (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip, fFlags, uErr, uCr2, SelTSS, &DescTSS);
4830 }
4831
4832 /* A null CS is bad. */
4833 RTSEL NewCS = Idte.Gate.u16Sel;
4834 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4837 return iemRaiseGeneralProtectionFault0(pVCpu);
4838 }
4839
4840 /* Fetch the descriptor for the new CS. */
4841 IEMSELDESC DescCS;
4842 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4843 if (rcStrict != VINF_SUCCESS)
4844 {
4845 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4846 return rcStrict;
4847 }
4848
4849 /* Must be a code segment. */
4850 if (!DescCS.Legacy.Gen.u1DescType)
4851 {
4852 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4853 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4854 }
4855 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4856 {
4857 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4858 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4859 }
4860
4861 /* Don't allow lowering the privilege level. */
4862 /** @todo Does the lowering of privileges apply to software interrupts
4863 * only? This has bearings on the more-privileged or
4864 * same-privilege stack behavior further down. A testcase would
4865 * be nice. */
4866 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4869 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4870 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4871 }
4872
4873 /* Make sure the selector is present. */
4874 if (!DescCS.Legacy.Gen.u1Present)
4875 {
4876 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4877 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4878 }
4879
4880 /* Check the new EIP against the new CS limit. */
4881 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4882 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4883 ? Idte.Gate.u16OffsetLow
4884 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4885 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4886 if (uNewEip > cbLimitCS)
4887 {
4888 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4889 u8Vector, uNewEip, cbLimitCS, NewCS));
4890 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4891 }
4892 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4893
4894 /* Calc the flag image to push. */
4895 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4896 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4897 fEfl &= ~X86_EFL_RF;
4898 else
4899 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4900
4901 /* From V8086 mode only go to CPL 0. */
4902 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4903 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4904 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4905 {
4906 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4907 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4908 }
4909
4910 /*
4911 * If the privilege level changes, we need to get a new stack from the TSS.
4912 * This in turns means validating the new SS and ESP...
4913 */
4914 if (uNewCpl != pVCpu->iem.s.uCpl)
4915 {
4916 RTSEL NewSS;
4917 uint32_t uNewEsp;
4918 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4919 if (rcStrict != VINF_SUCCESS)
4920 return rcStrict;
4921
4922 IEMSELDESC DescSS;
4923 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4924 if (rcStrict != VINF_SUCCESS)
4925 return rcStrict;
4926 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4927 if (!DescSS.Legacy.Gen.u1DefBig)
4928 {
4929 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4930 uNewEsp = (uint16_t)uNewEsp;
4931 }
4932
4933 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4934
4935 /* Check that there is sufficient space for the stack frame. */
4936 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4937 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4938 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4939 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4940
4941 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4942 {
4943 if ( uNewEsp - 1 > cbLimitSS
4944 || uNewEsp < cbStackFrame)
4945 {
4946 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4947 u8Vector, NewSS, uNewEsp, cbStackFrame));
4948 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4949 }
4950 }
4951 else
4952 {
4953 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4954 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4955 {
4956 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4957 u8Vector, NewSS, uNewEsp, cbStackFrame));
4958 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4959 }
4960 }
4961
4962 /*
4963 * Start making changes.
4964 */
4965
4966 /* Set the new CPL so that stack accesses use it. */
4967 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4968 pVCpu->iem.s.uCpl = uNewCpl;
4969
4970 /* Create the stack frame. */
4971 RTPTRUNION uStackFrame;
4972 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4973 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4974 if (rcStrict != VINF_SUCCESS)
4975 return rcStrict;
4976 void * const pvStackFrame = uStackFrame.pv;
4977 if (f32BitGate)
4978 {
4979 if (fFlags & IEM_XCPT_FLAGS_ERR)
4980 *uStackFrame.pu32++ = uErr;
4981 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4982 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4983 uStackFrame.pu32[2] = fEfl;
4984 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4985 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4986 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4987 if (fEfl & X86_EFL_VM)
4988 {
4989 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4990 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4991 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4992 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4993 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4994 }
4995 }
4996 else
4997 {
4998 if (fFlags & IEM_XCPT_FLAGS_ERR)
4999 *uStackFrame.pu16++ = uErr;
5000 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5001 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5002 uStackFrame.pu16[2] = fEfl;
5003 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5004 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5005 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5006 if (fEfl & X86_EFL_VM)
5007 {
5008 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5009 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5010 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5011 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5012 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5013 }
5014 }
5015 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5016 if (rcStrict != VINF_SUCCESS)
5017 return rcStrict;
5018
5019 /* Mark the selectors 'accessed' (hope this is the correct time). */
5020 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5021 * after pushing the stack frame? (Write protect the gdt + stack to
5022 * find out.) */
5023 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5024 {
5025 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5026 if (rcStrict != VINF_SUCCESS)
5027 return rcStrict;
5028 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5029 }
5030
5031 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5032 {
5033 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5034 if (rcStrict != VINF_SUCCESS)
5035 return rcStrict;
5036 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5037 }
5038
5039 /*
5040 * Start comitting the register changes (joins with the DPL=CPL branch).
5041 */
5042 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5043 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5044 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5045 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5046 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5047 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5048 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5049 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5050 * SP is loaded).
5051 * Need to check the other combinations too:
5052 * - 16-bit TSS, 32-bit handler
5053 * - 32-bit TSS, 16-bit handler */
5054 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5055 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5056 else
5057 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5058
5059 if (fEfl & X86_EFL_VM)
5060 {
5061 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5062 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5063 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5064 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5065 }
5066 }
5067 /*
5068 * Same privilege, no stack change and smaller stack frame.
5069 */
5070 else
5071 {
5072 uint64_t uNewRsp;
5073 RTPTRUNION uStackFrame;
5074 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5075 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5076 if (rcStrict != VINF_SUCCESS)
5077 return rcStrict;
5078 void * const pvStackFrame = uStackFrame.pv;
5079
5080 if (f32BitGate)
5081 {
5082 if (fFlags & IEM_XCPT_FLAGS_ERR)
5083 *uStackFrame.pu32++ = uErr;
5084 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5085 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5086 uStackFrame.pu32[2] = fEfl;
5087 }
5088 else
5089 {
5090 if (fFlags & IEM_XCPT_FLAGS_ERR)
5091 *uStackFrame.pu16++ = uErr;
5092 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5093 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5094 uStackFrame.pu16[2] = fEfl;
5095 }
5096 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5097 if (rcStrict != VINF_SUCCESS)
5098 return rcStrict;
5099
5100 /* Mark the CS selector as 'accessed'. */
5101 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5102 {
5103 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5104 if (rcStrict != VINF_SUCCESS)
5105 return rcStrict;
5106 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5107 }
5108
5109 /*
5110 * Start committing the register changes (joins with the other branch).
5111 */
5112 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5113 }
5114
5115 /* ... register committing continues. */
5116 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5117 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5118 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5119 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5120 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5121 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5122
5123 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5124 fEfl &= ~fEflToClear;
5125 IEMMISC_SET_EFL(pVCpu, fEfl);
5126
5127 if (fFlags & IEM_XCPT_FLAGS_CR2)
5128 pVCpu->cpum.GstCtx.cr2 = uCr2;
5129
5130 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5131 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5132
5133 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5134}
5135
5136
5137/**
5138 * Implements exceptions and interrupts for long mode.
5139 *
5140 * @returns VBox strict status code.
5141 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5142 * @param cbInstr The number of bytes to offset rIP by in the return
5143 * address.
5144 * @param u8Vector The interrupt / exception vector number.
5145 * @param fFlags The flags.
5146 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5147 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5148 */
5149IEM_STATIC VBOXSTRICTRC
5150iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5151 uint8_t cbInstr,
5152 uint8_t u8Vector,
5153 uint32_t fFlags,
5154 uint16_t uErr,
5155 uint64_t uCr2)
5156{
5157 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5158
5159 /*
5160 * Read the IDT entry.
5161 */
5162 uint16_t offIdt = (uint16_t)u8Vector << 4;
5163 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5164 {
5165 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5166 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5167 }
5168 X86DESC64 Idte;
5169 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5170 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5171 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5172 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5173 {
5174 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5175 return rcStrict;
5176 }
5177 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5178 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5179 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5180
5181 /*
5182 * Check the descriptor type, DPL and such.
5183 * ASSUMES this is done in the same order as described for call-gate calls.
5184 */
5185 if (Idte.Gate.u1DescType)
5186 {
5187 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5188 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5189 }
5190 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5191 switch (Idte.Gate.u4Type)
5192 {
5193 case AMD64_SEL_TYPE_SYS_INT_GATE:
5194 fEflToClear |= X86_EFL_IF;
5195 break;
5196 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5197 break;
5198
5199 default:
5200 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5201 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5202 }
5203
5204 /* Check DPL against CPL if applicable. */
5205 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5206 {
5207 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5208 {
5209 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5210 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5211 }
5212 }
5213
5214 /* Is it there? */
5215 if (!Idte.Gate.u1Present)
5216 {
5217 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5218 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5219 }
5220
5221 /* A null CS is bad. */
5222 RTSEL NewCS = Idte.Gate.u16Sel;
5223 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5224 {
5225 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5226 return iemRaiseGeneralProtectionFault0(pVCpu);
5227 }
5228
5229 /* Fetch the descriptor for the new CS. */
5230 IEMSELDESC DescCS;
5231 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5232 if (rcStrict != VINF_SUCCESS)
5233 {
5234 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5235 return rcStrict;
5236 }
5237
5238 /* Must be a 64-bit code segment. */
5239 if (!DescCS.Long.Gen.u1DescType)
5240 {
5241 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5242 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5243 }
5244 if ( !DescCS.Long.Gen.u1Long
5245 || DescCS.Long.Gen.u1DefBig
5246 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5247 {
5248 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5249 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5250 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5251 }
5252
5253 /* Don't allow lowering the privilege level. For non-conforming CS
5254 selectors, the CS.DPL sets the privilege level the trap/interrupt
5255 handler runs at. For conforming CS selectors, the CPL remains
5256 unchanged, but the CS.DPL must be <= CPL. */
5257 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5258 * when CPU in Ring-0. Result \#GP? */
5259 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5260 {
5261 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5262 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5263 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5264 }
5265
5266
5267 /* Make sure the selector is present. */
5268 if (!DescCS.Legacy.Gen.u1Present)
5269 {
5270 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5271 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5272 }
5273
5274 /* Check that the new RIP is canonical. */
5275 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5276 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5277 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5278 if (!IEM_IS_CANONICAL(uNewRip))
5279 {
5280 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5281 return iemRaiseGeneralProtectionFault0(pVCpu);
5282 }
5283
5284 /*
5285 * If the privilege level changes or if the IST isn't zero, we need to get
5286 * a new stack from the TSS.
5287 */
5288 uint64_t uNewRsp;
5289 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5290 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5291 if ( uNewCpl != pVCpu->iem.s.uCpl
5292 || Idte.Gate.u3IST != 0)
5293 {
5294 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5295 if (rcStrict != VINF_SUCCESS)
5296 return rcStrict;
5297 }
5298 else
5299 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5300 uNewRsp &= ~(uint64_t)0xf;
5301
5302 /*
5303 * Calc the flag image to push.
5304 */
5305 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5306 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5307 fEfl &= ~X86_EFL_RF;
5308 else
5309 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5310
5311 /*
5312 * Start making changes.
5313 */
5314 /* Set the new CPL so that stack accesses use it. */
5315 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5316 pVCpu->iem.s.uCpl = uNewCpl;
5317
5318 /* Create the stack frame. */
5319 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5320 RTPTRUNION uStackFrame;
5321 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5322 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5323 if (rcStrict != VINF_SUCCESS)
5324 return rcStrict;
5325 void * const pvStackFrame = uStackFrame.pv;
5326
5327 if (fFlags & IEM_XCPT_FLAGS_ERR)
5328 *uStackFrame.pu64++ = uErr;
5329 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5330 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5331 uStackFrame.pu64[2] = fEfl;
5332 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5333 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5334 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5335 if (rcStrict != VINF_SUCCESS)
5336 return rcStrict;
5337
5338 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5339 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5340 * after pushing the stack frame? (Write protect the gdt + stack to
5341 * find out.) */
5342 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5343 {
5344 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5345 if (rcStrict != VINF_SUCCESS)
5346 return rcStrict;
5347 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5348 }
5349
5350 /*
5351 * Start comitting the register changes.
5352 */
5353 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5354 * hidden registers when interrupting 32-bit or 16-bit code! */
5355 if (uNewCpl != uOldCpl)
5356 {
5357 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5358 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5359 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5360 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5361 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5362 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5363 }
5364 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5365 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5366 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5367 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5368 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5369 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5370 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5371 pVCpu->cpum.GstCtx.rip = uNewRip;
5372
5373 fEfl &= ~fEflToClear;
5374 IEMMISC_SET_EFL(pVCpu, fEfl);
5375
5376 if (fFlags & IEM_XCPT_FLAGS_CR2)
5377 pVCpu->cpum.GstCtx.cr2 = uCr2;
5378
5379 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5380 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5381
5382 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5383}
5384
5385
5386/**
5387 * Implements exceptions and interrupts.
5388 *
5389 * All exceptions and interrupts goes thru this function!
5390 *
5391 * @returns VBox strict status code.
5392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5393 * @param cbInstr The number of bytes to offset rIP by in the return
5394 * address.
5395 * @param u8Vector The interrupt / exception vector number.
5396 * @param fFlags The flags.
5397 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5398 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5399 */
5400DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5401iemRaiseXcptOrInt(PVMCPU pVCpu,
5402 uint8_t cbInstr,
5403 uint8_t u8Vector,
5404 uint32_t fFlags,
5405 uint16_t uErr,
5406 uint64_t uCr2)
5407{
5408 /*
5409 * Get all the state that we might need here.
5410 */
5411 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5412 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5413
5414#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5415 /*
5416 * Flush prefetch buffer
5417 */
5418 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5419#endif
5420
5421 /*
5422 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5423 */
5424 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5425 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5426 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5427 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5428 {
5429 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5430 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5431 u8Vector = X86_XCPT_GP;
5432 uErr = 0;
5433 }
5434#ifdef DBGFTRACE_ENABLED
5435 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5436 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5437 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5438#endif
5439
5440#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5441 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5442 {
5443 /*
5444 * If the event is being injected as part of VMRUN, it isn't subject to event
5445 * intercepts in the nested-guest. However, secondary exceptions that occur
5446 * during injection of any event -are- subject to exception intercepts.
5447 * See AMD spec. 15.20 "Event Injection".
5448 */
5449 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5450 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = 1;
5451 else
5452 {
5453 /*
5454 * Check and handle if the event being raised is intercepted.
5455 */
5456 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5457 if (rcStrict0 != VINF_HM_INTERCEPT_NOT_ACTIVE)
5458 return rcStrict0;
5459 }
5460 }
5461#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5462
5463 /*
5464 * Do recursion accounting.
5465 */
5466 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5467 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5468 if (pVCpu->iem.s.cXcptRecursions == 0)
5469 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5470 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5471 else
5472 {
5473 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5474 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5475 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5476
5477 if (pVCpu->iem.s.cXcptRecursions >= 4)
5478 {
5479#ifdef DEBUG_bird
5480 AssertFailed();
5481#endif
5482 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5483 }
5484
5485 /*
5486 * Evaluate the sequence of recurring events.
5487 */
5488 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5489 NULL /* pXcptRaiseInfo */);
5490 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5491 { /* likely */ }
5492 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5493 {
5494 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5495 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5496 u8Vector = X86_XCPT_DF;
5497 uErr = 0;
5498 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5499 if (IEM_IS_SVM_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5500 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5501 }
5502 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5503 {
5504 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5505 return iemInitiateCpuShutdown(pVCpu);
5506 }
5507 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5508 {
5509 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5510 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5511 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5512 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5513 return VERR_EM_GUEST_CPU_HANG;
5514 }
5515 else
5516 {
5517 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5518 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5519 return VERR_IEM_IPE_9;
5520 }
5521
5522 /*
5523 * The 'EXT' bit is set when an exception occurs during deliver of an external
5524 * event (such as an interrupt or earlier exception)[1]. Privileged software
5525 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5526 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5527 *
5528 * [1] - Intel spec. 6.13 "Error Code"
5529 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5530 * [3] - Intel Instruction reference for INT n.
5531 */
5532 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5533 && (fFlags & IEM_XCPT_FLAGS_ERR)
5534 && u8Vector != X86_XCPT_PF
5535 && u8Vector != X86_XCPT_DF)
5536 {
5537 uErr |= X86_TRAP_ERR_EXTERNAL;
5538 }
5539 }
5540
5541 pVCpu->iem.s.cXcptRecursions++;
5542 pVCpu->iem.s.uCurXcpt = u8Vector;
5543 pVCpu->iem.s.fCurXcpt = fFlags;
5544 pVCpu->iem.s.uCurXcptErr = uErr;
5545 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5546
5547 /*
5548 * Extensive logging.
5549 */
5550#if defined(LOG_ENABLED) && defined(IN_RING3)
5551 if (LogIs3Enabled())
5552 {
5553 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5554 PVM pVM = pVCpu->CTX_SUFF(pVM);
5555 char szRegs[4096];
5556 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5557 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5558 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5559 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5560 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5561 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5562 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5563 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5564 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5565 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5566 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5567 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5568 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5569 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5570 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5571 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5572 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5573 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5574 " efer=%016VR{efer}\n"
5575 " pat=%016VR{pat}\n"
5576 " sf_mask=%016VR{sf_mask}\n"
5577 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5578 " lstar=%016VR{lstar}\n"
5579 " star=%016VR{star} cstar=%016VR{cstar}\n"
5580 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5581 );
5582
5583 char szInstr[256];
5584 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5585 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5586 szInstr, sizeof(szInstr), NULL);
5587 Log3(("%s%s\n", szRegs, szInstr));
5588 }
5589#endif /* LOG_ENABLED */
5590
5591 /*
5592 * Call the mode specific worker function.
5593 */
5594 VBOXSTRICTRC rcStrict;
5595 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5596 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5597 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5598 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5599 else
5600 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5601
5602 /* Flush the prefetch buffer. */
5603#ifdef IEM_WITH_CODE_TLB
5604 pVCpu->iem.s.pbInstrBuf = NULL;
5605#else
5606 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5607#endif
5608
5609 /*
5610 * Unwind.
5611 */
5612 pVCpu->iem.s.cXcptRecursions--;
5613 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5614 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5615 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5616 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5617 pVCpu->iem.s.cXcptRecursions + 1));
5618 return rcStrict;
5619}
5620
5621#ifdef IEM_WITH_SETJMP
5622/**
5623 * See iemRaiseXcptOrInt. Will not return.
5624 */
5625IEM_STATIC DECL_NO_RETURN(void)
5626iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5627 uint8_t cbInstr,
5628 uint8_t u8Vector,
5629 uint32_t fFlags,
5630 uint16_t uErr,
5631 uint64_t uCr2)
5632{
5633 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5634 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5635}
5636#endif
5637
5638
5639/** \#DE - 00. */
5640DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5641{
5642 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5643}
5644
5645
5646/** \#DB - 01.
5647 * @note This automatically clear DR7.GD. */
5648DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5649{
5650 /** @todo set/clear RF. */
5651 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5652 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5653}
5654
5655
5656/** \#BR - 05. */
5657DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5658{
5659 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5660}
5661
5662
5663/** \#UD - 06. */
5664DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5665{
5666 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5667}
5668
5669
5670/** \#NM - 07. */
5671DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5672{
5673 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5674}
5675
5676
5677/** \#TS(err) - 0a. */
5678DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5679{
5680 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5681}
5682
5683
5684/** \#TS(tr) - 0a. */
5685DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5686{
5687 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5688 pVCpu->cpum.GstCtx.tr.Sel, 0);
5689}
5690
5691
5692/** \#TS(0) - 0a. */
5693DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5694{
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5696 0, 0);
5697}
5698
5699
5700/** \#TS(err) - 0a. */
5701DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5702{
5703 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5704 uSel & X86_SEL_MASK_OFF_RPL, 0);
5705}
5706
5707
5708/** \#NP(err) - 0b. */
5709DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5710{
5711 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5712}
5713
5714
5715/** \#NP(sel) - 0b. */
5716DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5717{
5718 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5719 uSel & ~X86_SEL_RPL, 0);
5720}
5721
5722
5723/** \#SS(seg) - 0c. */
5724DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5725{
5726 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5727 uSel & ~X86_SEL_RPL, 0);
5728}
5729
5730
5731/** \#SS(err) - 0c. */
5732DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5733{
5734 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5735}
5736
5737
5738/** \#GP(n) - 0d. */
5739DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5740{
5741 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5742}
5743
5744
5745/** \#GP(0) - 0d. */
5746DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5747{
5748 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5749}
5750
5751#ifdef IEM_WITH_SETJMP
5752/** \#GP(0) - 0d. */
5753DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5754{
5755 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5756}
5757#endif
5758
5759
5760/** \#GP(sel) - 0d. */
5761DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5762{
5763 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5764 Sel & ~X86_SEL_RPL, 0);
5765}
5766
5767
5768/** \#GP(0) - 0d. */
5769DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5770{
5771 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5772}
5773
5774
5775/** \#GP(sel) - 0d. */
5776DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5777{
5778 NOREF(iSegReg); NOREF(fAccess);
5779 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5780 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5781}
5782
5783#ifdef IEM_WITH_SETJMP
5784/** \#GP(sel) - 0d, longjmp. */
5785DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5786{
5787 NOREF(iSegReg); NOREF(fAccess);
5788 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5789 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5790}
5791#endif
5792
5793/** \#GP(sel) - 0d. */
5794DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5795{
5796 NOREF(Sel);
5797 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5798}
5799
5800#ifdef IEM_WITH_SETJMP
5801/** \#GP(sel) - 0d, longjmp. */
5802DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5803{
5804 NOREF(Sel);
5805 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5806}
5807#endif
5808
5809
5810/** \#GP(sel) - 0d. */
5811DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5812{
5813 NOREF(iSegReg); NOREF(fAccess);
5814 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5815}
5816
5817#ifdef IEM_WITH_SETJMP
5818/** \#GP(sel) - 0d, longjmp. */
5819DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5820 uint32_t fAccess)
5821{
5822 NOREF(iSegReg); NOREF(fAccess);
5823 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5824}
5825#endif
5826
5827
5828/** \#PF(n) - 0e. */
5829DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5830{
5831 uint16_t uErr;
5832 switch (rc)
5833 {
5834 case VERR_PAGE_NOT_PRESENT:
5835 case VERR_PAGE_TABLE_NOT_PRESENT:
5836 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5837 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5838 uErr = 0;
5839 break;
5840
5841 default:
5842 AssertMsgFailed(("%Rrc\n", rc));
5843 RT_FALL_THRU();
5844 case VERR_ACCESS_DENIED:
5845 uErr = X86_TRAP_PF_P;
5846 break;
5847
5848 /** @todo reserved */
5849 }
5850
5851 if (pVCpu->iem.s.uCpl == 3)
5852 uErr |= X86_TRAP_PF_US;
5853
5854 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5855 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5856 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5857 uErr |= X86_TRAP_PF_ID;
5858
5859#if 0 /* This is so much non-sense, really. Why was it done like that? */
5860 /* Note! RW access callers reporting a WRITE protection fault, will clear
5861 the READ flag before calling. So, read-modify-write accesses (RW)
5862 can safely be reported as READ faults. */
5863 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5864 uErr |= X86_TRAP_PF_RW;
5865#else
5866 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5867 {
5868 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5869 uErr |= X86_TRAP_PF_RW;
5870 }
5871#endif
5872
5873 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5874 uErr, GCPtrWhere);
5875}
5876
5877#ifdef IEM_WITH_SETJMP
5878/** \#PF(n) - 0e, longjmp. */
5879IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5880{
5881 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5882}
5883#endif
5884
5885
5886/** \#MF(0) - 10. */
5887DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5888{
5889 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5890}
5891
5892
5893/** \#AC(0) - 11. */
5894DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5895{
5896 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5897}
5898
5899
5900/**
5901 * Macro for calling iemCImplRaiseDivideError().
5902 *
5903 * This enables us to add/remove arguments and force different levels of
5904 * inlining as we wish.
5905 *
5906 * @return Strict VBox status code.
5907 */
5908#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5909IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5910{
5911 NOREF(cbInstr);
5912 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5913}
5914
5915
5916/**
5917 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5918 *
5919 * This enables us to add/remove arguments and force different levels of
5920 * inlining as we wish.
5921 *
5922 * @return Strict VBox status code.
5923 */
5924#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5925IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5926{
5927 NOREF(cbInstr);
5928 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5929}
5930
5931
5932/**
5933 * Macro for calling iemCImplRaiseInvalidOpcode().
5934 *
5935 * This enables us to add/remove arguments and force different levels of
5936 * inlining as we wish.
5937 *
5938 * @return Strict VBox status code.
5939 */
5940#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5941IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5942{
5943 NOREF(cbInstr);
5944 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5945}
5946
5947
5948/** @} */
5949
5950
5951/*
5952 *
5953 * Helpers routines.
5954 * Helpers routines.
5955 * Helpers routines.
5956 *
5957 */
5958
5959/**
5960 * Recalculates the effective operand size.
5961 *
5962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5963 */
5964IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
5965{
5966 switch (pVCpu->iem.s.enmCpuMode)
5967 {
5968 case IEMMODE_16BIT:
5969 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
5970 break;
5971 case IEMMODE_32BIT:
5972 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
5973 break;
5974 case IEMMODE_64BIT:
5975 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
5976 {
5977 case 0:
5978 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
5979 break;
5980 case IEM_OP_PRF_SIZE_OP:
5981 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
5982 break;
5983 case IEM_OP_PRF_SIZE_REX_W:
5984 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
5985 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
5986 break;
5987 }
5988 break;
5989 default:
5990 AssertFailed();
5991 }
5992}
5993
5994
5995/**
5996 * Sets the default operand size to 64-bit and recalculates the effective
5997 * operand size.
5998 *
5999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6000 */
6001IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6002{
6003 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6004 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6005 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6006 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6007 else
6008 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6009}
6010
6011
6012/*
6013 *
6014 * Common opcode decoders.
6015 * Common opcode decoders.
6016 * Common opcode decoders.
6017 *
6018 */
6019//#include <iprt/mem.h>
6020
6021/**
6022 * Used to add extra details about a stub case.
6023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6024 */
6025IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6026{
6027#if defined(LOG_ENABLED) && defined(IN_RING3)
6028 PVM pVM = pVCpu->CTX_SUFF(pVM);
6029 char szRegs[4096];
6030 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6031 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6032 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6033 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6034 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6035 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6036 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6037 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6038 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6039 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6040 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6041 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6042 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6043 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6044 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6045 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6046 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6047 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6048 " efer=%016VR{efer}\n"
6049 " pat=%016VR{pat}\n"
6050 " sf_mask=%016VR{sf_mask}\n"
6051 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6052 " lstar=%016VR{lstar}\n"
6053 " star=%016VR{star} cstar=%016VR{cstar}\n"
6054 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6055 );
6056
6057 char szInstr[256];
6058 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6059 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6060 szInstr, sizeof(szInstr), NULL);
6061
6062 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6063#else
6064 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6065#endif
6066}
6067
6068/**
6069 * Complains about a stub.
6070 *
6071 * Providing two versions of this macro, one for daily use and one for use when
6072 * working on IEM.
6073 */
6074#if 0
6075# define IEMOP_BITCH_ABOUT_STUB() \
6076 do { \
6077 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6078 iemOpStubMsg2(pVCpu); \
6079 RTAssertPanic(); \
6080 } while (0)
6081#else
6082# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6083#endif
6084
6085/** Stubs an opcode. */
6086#define FNIEMOP_STUB(a_Name) \
6087 FNIEMOP_DEF(a_Name) \
6088 { \
6089 RT_NOREF_PV(pVCpu); \
6090 IEMOP_BITCH_ABOUT_STUB(); \
6091 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6092 } \
6093 typedef int ignore_semicolon
6094
6095/** Stubs an opcode. */
6096#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6097 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6098 { \
6099 RT_NOREF_PV(pVCpu); \
6100 RT_NOREF_PV(a_Name0); \
6101 IEMOP_BITCH_ABOUT_STUB(); \
6102 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6103 } \
6104 typedef int ignore_semicolon
6105
6106/** Stubs an opcode which currently should raise \#UD. */
6107#define FNIEMOP_UD_STUB(a_Name) \
6108 FNIEMOP_DEF(a_Name) \
6109 { \
6110 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6111 return IEMOP_RAISE_INVALID_OPCODE(); \
6112 } \
6113 typedef int ignore_semicolon
6114
6115/** Stubs an opcode which currently should raise \#UD. */
6116#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6117 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6118 { \
6119 RT_NOREF_PV(pVCpu); \
6120 RT_NOREF_PV(a_Name0); \
6121 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6122 return IEMOP_RAISE_INVALID_OPCODE(); \
6123 } \
6124 typedef int ignore_semicolon
6125
6126
6127
6128/** @name Register Access.
6129 * @{
6130 */
6131
6132/**
6133 * Gets a reference (pointer) to the specified hidden segment register.
6134 *
6135 * @returns Hidden register reference.
6136 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6137 * @param iSegReg The segment register.
6138 */
6139IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6140{
6141 Assert(iSegReg < X86_SREG_COUNT);
6142 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6143 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6144
6145#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6146 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6147 { /* likely */ }
6148 else
6149 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6150#else
6151 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6152#endif
6153 return pSReg;
6154}
6155
6156
6157/**
6158 * Ensures that the given hidden segment register is up to date.
6159 *
6160 * @returns Hidden register reference.
6161 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6162 * @param pSReg The segment register.
6163 */
6164IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6165{
6166#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6167 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6168 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6169#else
6170 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6171 NOREF(pVCpu);
6172#endif
6173 return pSReg;
6174}
6175
6176
6177/**
6178 * Gets a reference (pointer) to the specified segment register (the selector
6179 * value).
6180 *
6181 * @returns Pointer to the selector variable.
6182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6183 * @param iSegReg The segment register.
6184 */
6185DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6186{
6187 Assert(iSegReg < X86_SREG_COUNT);
6188 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6189 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6190}
6191
6192
6193/**
6194 * Fetches the selector value of a segment register.
6195 *
6196 * @returns The selector value.
6197 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6198 * @param iSegReg The segment register.
6199 */
6200DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6201{
6202 Assert(iSegReg < X86_SREG_COUNT);
6203 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6204 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6205}
6206
6207
6208/**
6209 * Fetches the base address value of a segment register.
6210 *
6211 * @returns The selector value.
6212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6213 * @param iSegReg The segment register.
6214 */
6215DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6216{
6217 Assert(iSegReg < X86_SREG_COUNT);
6218 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6219 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6220}
6221
6222
6223/**
6224 * Gets a reference (pointer) to the specified general purpose register.
6225 *
6226 * @returns Register reference.
6227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6228 * @param iReg The general purpose register.
6229 */
6230DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6231{
6232 Assert(iReg < 16);
6233 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6234}
6235
6236
6237/**
6238 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6239 *
6240 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6241 *
6242 * @returns Register reference.
6243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6244 * @param iReg The register.
6245 */
6246DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6247{
6248 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6249 {
6250 Assert(iReg < 16);
6251 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6252 }
6253 /* high 8-bit register. */
6254 Assert(iReg < 8);
6255 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6256}
6257
6258
6259/**
6260 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6261 *
6262 * @returns Register reference.
6263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6264 * @param iReg The register.
6265 */
6266DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6267{
6268 Assert(iReg < 16);
6269 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6270}
6271
6272
6273/**
6274 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6275 *
6276 * @returns Register reference.
6277 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6278 * @param iReg The register.
6279 */
6280DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6281{
6282 Assert(iReg < 16);
6283 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6284}
6285
6286
6287/**
6288 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6289 *
6290 * @returns Register reference.
6291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6292 * @param iReg The register.
6293 */
6294DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6295{
6296 Assert(iReg < 64);
6297 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6298}
6299
6300
6301/**
6302 * Gets a reference (pointer) to the specified segment register's base address.
6303 *
6304 * @returns Segment register base address reference.
6305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6306 * @param iSegReg The segment selector.
6307 */
6308DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6309{
6310 Assert(iSegReg < X86_SREG_COUNT);
6311 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6312 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6313}
6314
6315
6316/**
6317 * Fetches the value of a 8-bit general purpose register.
6318 *
6319 * @returns The register value.
6320 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6321 * @param iReg The register.
6322 */
6323DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6324{
6325 return *iemGRegRefU8(pVCpu, iReg);
6326}
6327
6328
6329/**
6330 * Fetches the value of a 16-bit general purpose register.
6331 *
6332 * @returns The register value.
6333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6334 * @param iReg The register.
6335 */
6336DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6337{
6338 Assert(iReg < 16);
6339 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6340}
6341
6342
6343/**
6344 * Fetches the value of a 32-bit general purpose register.
6345 *
6346 * @returns The register value.
6347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6348 * @param iReg The register.
6349 */
6350DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6351{
6352 Assert(iReg < 16);
6353 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6354}
6355
6356
6357/**
6358 * Fetches the value of a 64-bit general purpose register.
6359 *
6360 * @returns The register value.
6361 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6362 * @param iReg The register.
6363 */
6364DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6365{
6366 Assert(iReg < 16);
6367 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6368}
6369
6370
6371/**
6372 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6373 *
6374 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6375 * segment limit.
6376 *
6377 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6378 * @param offNextInstr The offset of the next instruction.
6379 */
6380IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6381{
6382 switch (pVCpu->iem.s.enmEffOpSize)
6383 {
6384 case IEMMODE_16BIT:
6385 {
6386 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6387 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6388 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6389 return iemRaiseGeneralProtectionFault0(pVCpu);
6390 pVCpu->cpum.GstCtx.rip = uNewIp;
6391 break;
6392 }
6393
6394 case IEMMODE_32BIT:
6395 {
6396 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6397 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6398
6399 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6400 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6401 return iemRaiseGeneralProtectionFault0(pVCpu);
6402 pVCpu->cpum.GstCtx.rip = uNewEip;
6403 break;
6404 }
6405
6406 case IEMMODE_64BIT:
6407 {
6408 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6409
6410 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6411 if (!IEM_IS_CANONICAL(uNewRip))
6412 return iemRaiseGeneralProtectionFault0(pVCpu);
6413 pVCpu->cpum.GstCtx.rip = uNewRip;
6414 break;
6415 }
6416
6417 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6418 }
6419
6420 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6421
6422#ifndef IEM_WITH_CODE_TLB
6423 /* Flush the prefetch buffer. */
6424 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6425#endif
6426
6427 return VINF_SUCCESS;
6428}
6429
6430
6431/**
6432 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6433 *
6434 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6435 * segment limit.
6436 *
6437 * @returns Strict VBox status code.
6438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6439 * @param offNextInstr The offset of the next instruction.
6440 */
6441IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6442{
6443 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6444
6445 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6446 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6447 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6448 return iemRaiseGeneralProtectionFault0(pVCpu);
6449 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6450 pVCpu->cpum.GstCtx.rip = uNewIp;
6451 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6452
6453#ifndef IEM_WITH_CODE_TLB
6454 /* Flush the prefetch buffer. */
6455 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6456#endif
6457
6458 return VINF_SUCCESS;
6459}
6460
6461
6462/**
6463 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6464 *
6465 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6466 * segment limit.
6467 *
6468 * @returns Strict VBox status code.
6469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6470 * @param offNextInstr The offset of the next instruction.
6471 */
6472IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6473{
6474 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6475
6476 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6477 {
6478 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6479
6480 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6481 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6482 return iemRaiseGeneralProtectionFault0(pVCpu);
6483 pVCpu->cpum.GstCtx.rip = uNewEip;
6484 }
6485 else
6486 {
6487 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6488
6489 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6490 if (!IEM_IS_CANONICAL(uNewRip))
6491 return iemRaiseGeneralProtectionFault0(pVCpu);
6492 pVCpu->cpum.GstCtx.rip = uNewRip;
6493 }
6494 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6495
6496#ifndef IEM_WITH_CODE_TLB
6497 /* Flush the prefetch buffer. */
6498 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6499#endif
6500
6501 return VINF_SUCCESS;
6502}
6503
6504
6505/**
6506 * Performs a near jump to the specified address.
6507 *
6508 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6509 * segment limit.
6510 *
6511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6512 * @param uNewRip The new RIP value.
6513 */
6514IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6515{
6516 switch (pVCpu->iem.s.enmEffOpSize)
6517 {
6518 case IEMMODE_16BIT:
6519 {
6520 Assert(uNewRip <= UINT16_MAX);
6521 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6522 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6523 return iemRaiseGeneralProtectionFault0(pVCpu);
6524 /** @todo Test 16-bit jump in 64-bit mode. */
6525 pVCpu->cpum.GstCtx.rip = uNewRip;
6526 break;
6527 }
6528
6529 case IEMMODE_32BIT:
6530 {
6531 Assert(uNewRip <= UINT32_MAX);
6532 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6533 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6534
6535 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6536 return iemRaiseGeneralProtectionFault0(pVCpu);
6537 pVCpu->cpum.GstCtx.rip = uNewRip;
6538 break;
6539 }
6540
6541 case IEMMODE_64BIT:
6542 {
6543 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6544
6545 if (!IEM_IS_CANONICAL(uNewRip))
6546 return iemRaiseGeneralProtectionFault0(pVCpu);
6547 pVCpu->cpum.GstCtx.rip = uNewRip;
6548 break;
6549 }
6550
6551 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6552 }
6553
6554 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6555
6556#ifndef IEM_WITH_CODE_TLB
6557 /* Flush the prefetch buffer. */
6558 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6559#endif
6560
6561 return VINF_SUCCESS;
6562}
6563
6564
6565/**
6566 * Get the address of the top of the stack.
6567 *
6568 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6569 */
6570DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6571{
6572 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6573 return pVCpu->cpum.GstCtx.rsp;
6574 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6575 return pVCpu->cpum.GstCtx.esp;
6576 return pVCpu->cpum.GstCtx.sp;
6577}
6578
6579
6580/**
6581 * Updates the RIP/EIP/IP to point to the next instruction.
6582 *
6583 * This function leaves the EFLAGS.RF flag alone.
6584 *
6585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6586 * @param cbInstr The number of bytes to add.
6587 */
6588IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6589{
6590 switch (pVCpu->iem.s.enmCpuMode)
6591 {
6592 case IEMMODE_16BIT:
6593 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6594 pVCpu->cpum.GstCtx.eip += cbInstr;
6595 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6596 break;
6597
6598 case IEMMODE_32BIT:
6599 pVCpu->cpum.GstCtx.eip += cbInstr;
6600 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6601 break;
6602
6603 case IEMMODE_64BIT:
6604 pVCpu->cpum.GstCtx.rip += cbInstr;
6605 break;
6606 default: AssertFailed();
6607 }
6608}
6609
6610
6611#if 0
6612/**
6613 * Updates the RIP/EIP/IP to point to the next instruction.
6614 *
6615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6616 */
6617IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6618{
6619 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6620}
6621#endif
6622
6623
6624
6625/**
6626 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6627 *
6628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6629 * @param cbInstr The number of bytes to add.
6630 */
6631IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6632{
6633 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6634
6635 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6636#if ARCH_BITS >= 64
6637 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6638 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6639 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6640#else
6641 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6642 pVCpu->cpum.GstCtx.rip += cbInstr;
6643 else
6644 pVCpu->cpum.GstCtx.eip += cbInstr;
6645#endif
6646}
6647
6648
6649/**
6650 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6651 *
6652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6653 */
6654IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6655{
6656 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6657}
6658
6659
6660/**
6661 * Adds to the stack pointer.
6662 *
6663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6664 * @param cbToAdd The number of bytes to add (8-bit!).
6665 */
6666DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6667{
6668 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6669 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6670 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6671 pVCpu->cpum.GstCtx.esp += cbToAdd;
6672 else
6673 pVCpu->cpum.GstCtx.sp += cbToAdd;
6674}
6675
6676
6677/**
6678 * Subtracts from the stack pointer.
6679 *
6680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6681 * @param cbToSub The number of bytes to subtract (8-bit!).
6682 */
6683DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6684{
6685 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6686 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6687 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6688 pVCpu->cpum.GstCtx.esp -= cbToSub;
6689 else
6690 pVCpu->cpum.GstCtx.sp -= cbToSub;
6691}
6692
6693
6694/**
6695 * Adds to the temporary stack pointer.
6696 *
6697 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6698 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6699 * @param cbToAdd The number of bytes to add (16-bit).
6700 */
6701DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6702{
6703 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6704 pTmpRsp->u += cbToAdd;
6705 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6706 pTmpRsp->DWords.dw0 += cbToAdd;
6707 else
6708 pTmpRsp->Words.w0 += cbToAdd;
6709}
6710
6711
6712/**
6713 * Subtracts from the temporary stack pointer.
6714 *
6715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6716 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6717 * @param cbToSub The number of bytes to subtract.
6718 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6719 * expecting that.
6720 */
6721DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6722{
6723 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6724 pTmpRsp->u -= cbToSub;
6725 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6726 pTmpRsp->DWords.dw0 -= cbToSub;
6727 else
6728 pTmpRsp->Words.w0 -= cbToSub;
6729}
6730
6731
6732/**
6733 * Calculates the effective stack address for a push of the specified size as
6734 * well as the new RSP value (upper bits may be masked).
6735 *
6736 * @returns Effective stack addressf for the push.
6737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6738 * @param cbItem The size of the stack item to pop.
6739 * @param puNewRsp Where to return the new RSP value.
6740 */
6741DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6742{
6743 RTUINT64U uTmpRsp;
6744 RTGCPTR GCPtrTop;
6745 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6746
6747 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6748 GCPtrTop = uTmpRsp.u -= cbItem;
6749 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6750 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6751 else
6752 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6753 *puNewRsp = uTmpRsp.u;
6754 return GCPtrTop;
6755}
6756
6757
6758/**
6759 * Gets the current stack pointer and calculates the value after a pop of the
6760 * specified size.
6761 *
6762 * @returns Current stack pointer.
6763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6764 * @param cbItem The size of the stack item to pop.
6765 * @param puNewRsp Where to return the new RSP value.
6766 */
6767DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6768{
6769 RTUINT64U uTmpRsp;
6770 RTGCPTR GCPtrTop;
6771 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6772
6773 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6774 {
6775 GCPtrTop = uTmpRsp.u;
6776 uTmpRsp.u += cbItem;
6777 }
6778 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6779 {
6780 GCPtrTop = uTmpRsp.DWords.dw0;
6781 uTmpRsp.DWords.dw0 += cbItem;
6782 }
6783 else
6784 {
6785 GCPtrTop = uTmpRsp.Words.w0;
6786 uTmpRsp.Words.w0 += cbItem;
6787 }
6788 *puNewRsp = uTmpRsp.u;
6789 return GCPtrTop;
6790}
6791
6792
6793/**
6794 * Calculates the effective stack address for a push of the specified size as
6795 * well as the new temporary RSP value (upper bits may be masked).
6796 *
6797 * @returns Effective stack addressf for the push.
6798 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6799 * @param pTmpRsp The temporary stack pointer. This is updated.
6800 * @param cbItem The size of the stack item to pop.
6801 */
6802DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6803{
6804 RTGCPTR GCPtrTop;
6805
6806 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6807 GCPtrTop = pTmpRsp->u -= cbItem;
6808 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6809 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6810 else
6811 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6812 return GCPtrTop;
6813}
6814
6815
6816/**
6817 * Gets the effective stack address for a pop of the specified size and
6818 * calculates and updates the temporary RSP.
6819 *
6820 * @returns Current stack pointer.
6821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6822 * @param pTmpRsp The temporary stack pointer. This is updated.
6823 * @param cbItem The size of the stack item to pop.
6824 */
6825DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6826{
6827 RTGCPTR GCPtrTop;
6828 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6829 {
6830 GCPtrTop = pTmpRsp->u;
6831 pTmpRsp->u += cbItem;
6832 }
6833 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6834 {
6835 GCPtrTop = pTmpRsp->DWords.dw0;
6836 pTmpRsp->DWords.dw0 += cbItem;
6837 }
6838 else
6839 {
6840 GCPtrTop = pTmpRsp->Words.w0;
6841 pTmpRsp->Words.w0 += cbItem;
6842 }
6843 return GCPtrTop;
6844}
6845
6846/** @} */
6847
6848
6849/** @name FPU access and helpers.
6850 *
6851 * @{
6852 */
6853
6854
6855/**
6856 * Hook for preparing to use the host FPU.
6857 *
6858 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6859 *
6860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6861 */
6862DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6863{
6864#ifdef IN_RING3
6865 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6866#else
6867 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6868#endif
6869 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6870}
6871
6872
6873/**
6874 * Hook for preparing to use the host FPU for SSE.
6875 *
6876 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6877 *
6878 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6879 */
6880DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6881{
6882 iemFpuPrepareUsage(pVCpu);
6883}
6884
6885
6886/**
6887 * Hook for preparing to use the host FPU for AVX.
6888 *
6889 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6890 *
6891 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6892 */
6893DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6894{
6895 iemFpuPrepareUsage(pVCpu);
6896}
6897
6898
6899/**
6900 * Hook for actualizing the guest FPU state before the interpreter reads it.
6901 *
6902 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6903 *
6904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6905 */
6906DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
6907{
6908#ifdef IN_RING3
6909 NOREF(pVCpu);
6910#else
6911 CPUMRZFpuStateActualizeForRead(pVCpu);
6912#endif
6913 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6914}
6915
6916
6917/**
6918 * Hook for actualizing the guest FPU state before the interpreter changes it.
6919 *
6920 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6921 *
6922 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6923 */
6924DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
6925{
6926#ifdef IN_RING3
6927 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6928#else
6929 CPUMRZFpuStateActualizeForChange(pVCpu);
6930#endif
6931 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6932}
6933
6934
6935/**
6936 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6937 * only.
6938 *
6939 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6940 *
6941 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6942 */
6943DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
6944{
6945#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6946 NOREF(pVCpu);
6947#else
6948 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6949#endif
6950 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6951}
6952
6953
6954/**
6955 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6956 * read+write.
6957 *
6958 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6959 *
6960 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6961 */
6962DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
6963{
6964#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6965 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6966#else
6967 CPUMRZFpuStateActualizeForChange(pVCpu);
6968#endif
6969 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6970}
6971
6972
6973/**
6974 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
6975 * only.
6976 *
6977 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6978 *
6979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6980 */
6981DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
6982{
6983#ifdef IN_RING3
6984 NOREF(pVCpu);
6985#else
6986 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
6987#endif
6988 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6989}
6990
6991
6992/**
6993 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
6994 * read+write.
6995 *
6996 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7001{
7002#ifdef IN_RING3
7003 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7004#else
7005 CPUMRZFpuStateActualizeForChange(pVCpu);
7006#endif
7007 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7008}
7009
7010
7011/**
7012 * Stores a QNaN value into a FPU register.
7013 *
7014 * @param pReg Pointer to the register.
7015 */
7016DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7017{
7018 pReg->au32[0] = UINT32_C(0x00000000);
7019 pReg->au32[1] = UINT32_C(0xc0000000);
7020 pReg->au16[4] = UINT16_C(0xffff);
7021}
7022
7023
7024/**
7025 * Updates the FOP, FPU.CS and FPUIP registers.
7026 *
7027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7028 * @param pFpuCtx The FPU context.
7029 */
7030DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7031{
7032 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7033 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7034 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7035 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7036 {
7037 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7038 * happens in real mode here based on the fnsave and fnstenv images. */
7039 pFpuCtx->CS = 0;
7040 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7041 }
7042 else
7043 {
7044 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7045 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7046 }
7047}
7048
7049
7050/**
7051 * Updates the x87.DS and FPUDP registers.
7052 *
7053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7054 * @param pFpuCtx The FPU context.
7055 * @param iEffSeg The effective segment register.
7056 * @param GCPtrEff The effective address relative to @a iEffSeg.
7057 */
7058DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7059{
7060 RTSEL sel;
7061 switch (iEffSeg)
7062 {
7063 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7064 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7065 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7066 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7067 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7068 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7069 default:
7070 AssertMsgFailed(("%d\n", iEffSeg));
7071 sel = pVCpu->cpum.GstCtx.ds.Sel;
7072 }
7073 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7074 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7075 {
7076 pFpuCtx->DS = 0;
7077 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7078 }
7079 else
7080 {
7081 pFpuCtx->DS = sel;
7082 pFpuCtx->FPUDP = GCPtrEff;
7083 }
7084}
7085
7086
7087/**
7088 * Rotates the stack registers in the push direction.
7089 *
7090 * @param pFpuCtx The FPU context.
7091 * @remarks This is a complete waste of time, but fxsave stores the registers in
7092 * stack order.
7093 */
7094DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7095{
7096 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7097 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7098 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7099 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7100 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7101 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7102 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7103 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7104 pFpuCtx->aRegs[0].r80 = r80Tmp;
7105}
7106
7107
7108/**
7109 * Rotates the stack registers in the pop direction.
7110 *
7111 * @param pFpuCtx The FPU context.
7112 * @remarks This is a complete waste of time, but fxsave stores the registers in
7113 * stack order.
7114 */
7115DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7116{
7117 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7118 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7119 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7120 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7121 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7122 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7123 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7124 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7125 pFpuCtx->aRegs[7].r80 = r80Tmp;
7126}
7127
7128
7129/**
7130 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7131 * exception prevents it.
7132 *
7133 * @param pResult The FPU operation result to push.
7134 * @param pFpuCtx The FPU context.
7135 */
7136IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7137{
7138 /* Update FSW and bail if there are pending exceptions afterwards. */
7139 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7140 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7141 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7142 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7143 {
7144 pFpuCtx->FSW = fFsw;
7145 return;
7146 }
7147
7148 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7149 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7150 {
7151 /* All is fine, push the actual value. */
7152 pFpuCtx->FTW |= RT_BIT(iNewTop);
7153 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7154 }
7155 else if (pFpuCtx->FCW & X86_FCW_IM)
7156 {
7157 /* Masked stack overflow, push QNaN. */
7158 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7159 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7160 }
7161 else
7162 {
7163 /* Raise stack overflow, don't push anything. */
7164 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7165 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7166 return;
7167 }
7168
7169 fFsw &= ~X86_FSW_TOP_MASK;
7170 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7171 pFpuCtx->FSW = fFsw;
7172
7173 iemFpuRotateStackPush(pFpuCtx);
7174}
7175
7176
7177/**
7178 * Stores a result in a FPU register and updates the FSW and FTW.
7179 *
7180 * @param pFpuCtx The FPU context.
7181 * @param pResult The result to store.
7182 * @param iStReg Which FPU register to store it in.
7183 */
7184IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7185{
7186 Assert(iStReg < 8);
7187 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7188 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7189 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7190 pFpuCtx->FTW |= RT_BIT(iReg);
7191 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7192}
7193
7194
7195/**
7196 * Only updates the FPU status word (FSW) with the result of the current
7197 * instruction.
7198 *
7199 * @param pFpuCtx The FPU context.
7200 * @param u16FSW The FSW output of the current instruction.
7201 */
7202IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7203{
7204 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7205 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7206}
7207
7208
7209/**
7210 * Pops one item off the FPU stack if no pending exception prevents it.
7211 *
7212 * @param pFpuCtx The FPU context.
7213 */
7214IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7215{
7216 /* Check pending exceptions. */
7217 uint16_t uFSW = pFpuCtx->FSW;
7218 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7219 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7220 return;
7221
7222 /* TOP--. */
7223 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7224 uFSW &= ~X86_FSW_TOP_MASK;
7225 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7226 pFpuCtx->FSW = uFSW;
7227
7228 /* Mark the previous ST0 as empty. */
7229 iOldTop >>= X86_FSW_TOP_SHIFT;
7230 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7231
7232 /* Rotate the registers. */
7233 iemFpuRotateStackPop(pFpuCtx);
7234}
7235
7236
7237/**
7238 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7239 *
7240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7241 * @param pResult The FPU operation result to push.
7242 */
7243IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7244{
7245 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7246 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7247 iemFpuMaybePushResult(pResult, pFpuCtx);
7248}
7249
7250
7251/**
7252 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7253 * and sets FPUDP and FPUDS.
7254 *
7255 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7256 * @param pResult The FPU operation result to push.
7257 * @param iEffSeg The effective segment register.
7258 * @param GCPtrEff The effective address relative to @a iEffSeg.
7259 */
7260IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7261{
7262 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7263 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7264 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7265 iemFpuMaybePushResult(pResult, pFpuCtx);
7266}
7267
7268
7269/**
7270 * Replace ST0 with the first value and push the second onto the FPU stack,
7271 * unless a pending exception prevents it.
7272 *
7273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7274 * @param pResult The FPU operation result to store and push.
7275 */
7276IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7277{
7278 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7279 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7280
7281 /* Update FSW and bail if there are pending exceptions afterwards. */
7282 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7283 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7284 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7285 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7286 {
7287 pFpuCtx->FSW = fFsw;
7288 return;
7289 }
7290
7291 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7292 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7293 {
7294 /* All is fine, push the actual value. */
7295 pFpuCtx->FTW |= RT_BIT(iNewTop);
7296 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7297 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7298 }
7299 else if (pFpuCtx->FCW & X86_FCW_IM)
7300 {
7301 /* Masked stack overflow, push QNaN. */
7302 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7303 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7304 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7305 }
7306 else
7307 {
7308 /* Raise stack overflow, don't push anything. */
7309 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7310 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7311 return;
7312 }
7313
7314 fFsw &= ~X86_FSW_TOP_MASK;
7315 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7316 pFpuCtx->FSW = fFsw;
7317
7318 iemFpuRotateStackPush(pFpuCtx);
7319}
7320
7321
7322/**
7323 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7324 * FOP.
7325 *
7326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7327 * @param pResult The result to store.
7328 * @param iStReg Which FPU register to store it in.
7329 */
7330IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7331{
7332 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7333 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7334 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7335}
7336
7337
7338/**
7339 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7340 * FOP, and then pops the stack.
7341 *
7342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7343 * @param pResult The result to store.
7344 * @param iStReg Which FPU register to store it in.
7345 */
7346IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7347{
7348 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7349 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7350 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7351 iemFpuMaybePopOne(pFpuCtx);
7352}
7353
7354
7355/**
7356 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7357 * FPUDP, and FPUDS.
7358 *
7359 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7360 * @param pResult The result to store.
7361 * @param iStReg Which FPU register to store it in.
7362 * @param iEffSeg The effective memory operand selector register.
7363 * @param GCPtrEff The effective memory operand offset.
7364 */
7365IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7366 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7367{
7368 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7369 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7370 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7371 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7372}
7373
7374
7375/**
7376 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7377 * FPUDP, and FPUDS, and then pops the stack.
7378 *
7379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7380 * @param pResult The result to store.
7381 * @param iStReg Which FPU register to store it in.
7382 * @param iEffSeg The effective memory operand selector register.
7383 * @param GCPtrEff The effective memory operand offset.
7384 */
7385IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7386 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7387{
7388 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7389 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7390 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7391 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7392 iemFpuMaybePopOne(pFpuCtx);
7393}
7394
7395
7396/**
7397 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7398 *
7399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7400 */
7401IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7402{
7403 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7404 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7405}
7406
7407
7408/**
7409 * Marks the specified stack register as free (for FFREE).
7410 *
7411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7412 * @param iStReg The register to free.
7413 */
7414IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7415{
7416 Assert(iStReg < 8);
7417 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7418 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7419 pFpuCtx->FTW &= ~RT_BIT(iReg);
7420}
7421
7422
7423/**
7424 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7425 *
7426 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7427 */
7428IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7429{
7430 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7431 uint16_t uFsw = pFpuCtx->FSW;
7432 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7433 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7434 uFsw &= ~X86_FSW_TOP_MASK;
7435 uFsw |= uTop;
7436 pFpuCtx->FSW = uFsw;
7437}
7438
7439
7440/**
7441 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7442 *
7443 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7444 */
7445IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7446{
7447 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7448 uint16_t uFsw = pFpuCtx->FSW;
7449 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7450 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7451 uFsw &= ~X86_FSW_TOP_MASK;
7452 uFsw |= uTop;
7453 pFpuCtx->FSW = uFsw;
7454}
7455
7456
7457/**
7458 * Updates the FSW, FOP, FPUIP, and FPUCS.
7459 *
7460 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7461 * @param u16FSW The FSW from the current instruction.
7462 */
7463IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7464{
7465 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7466 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7467 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7468}
7469
7470
7471/**
7472 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7473 *
7474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7475 * @param u16FSW The FSW from the current instruction.
7476 */
7477IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7478{
7479 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7480 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7481 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7482 iemFpuMaybePopOne(pFpuCtx);
7483}
7484
7485
7486/**
7487 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7488 *
7489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7490 * @param u16FSW The FSW from the current instruction.
7491 * @param iEffSeg The effective memory operand selector register.
7492 * @param GCPtrEff The effective memory operand offset.
7493 */
7494IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7495{
7496 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7497 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7498 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7499 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7500}
7501
7502
7503/**
7504 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7505 *
7506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7507 * @param u16FSW The FSW from the current instruction.
7508 */
7509IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7510{
7511 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7512 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7513 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7514 iemFpuMaybePopOne(pFpuCtx);
7515 iemFpuMaybePopOne(pFpuCtx);
7516}
7517
7518
7519/**
7520 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7521 *
7522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7523 * @param u16FSW The FSW from the current instruction.
7524 * @param iEffSeg The effective memory operand selector register.
7525 * @param GCPtrEff The effective memory operand offset.
7526 */
7527IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7528{
7529 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7530 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7531 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7532 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7533 iemFpuMaybePopOne(pFpuCtx);
7534}
7535
7536
7537/**
7538 * Worker routine for raising an FPU stack underflow exception.
7539 *
7540 * @param pFpuCtx The FPU context.
7541 * @param iStReg The stack register being accessed.
7542 */
7543IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7544{
7545 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7546 if (pFpuCtx->FCW & X86_FCW_IM)
7547 {
7548 /* Masked underflow. */
7549 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7550 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7551 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7552 if (iStReg != UINT8_MAX)
7553 {
7554 pFpuCtx->FTW |= RT_BIT(iReg);
7555 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7556 }
7557 }
7558 else
7559 {
7560 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7561 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7562 }
7563}
7564
7565
7566/**
7567 * Raises a FPU stack underflow exception.
7568 *
7569 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7570 * @param iStReg The destination register that should be loaded
7571 * with QNaN if \#IS is not masked. Specify
7572 * UINT8_MAX if none (like for fcom).
7573 */
7574DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7575{
7576 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7577 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7578 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7579}
7580
7581
7582DECL_NO_INLINE(IEM_STATIC, void)
7583iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7584{
7585 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7586 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7587 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7588 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7589}
7590
7591
7592DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7593{
7594 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7595 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7596 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7597 iemFpuMaybePopOne(pFpuCtx);
7598}
7599
7600
7601DECL_NO_INLINE(IEM_STATIC, void)
7602iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7603{
7604 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7605 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7606 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7607 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7608 iemFpuMaybePopOne(pFpuCtx);
7609}
7610
7611
7612DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7613{
7614 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7615 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7616 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7617 iemFpuMaybePopOne(pFpuCtx);
7618 iemFpuMaybePopOne(pFpuCtx);
7619}
7620
7621
7622DECL_NO_INLINE(IEM_STATIC, void)
7623iemFpuStackPushUnderflow(PVMCPU pVCpu)
7624{
7625 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7626 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7627
7628 if (pFpuCtx->FCW & X86_FCW_IM)
7629 {
7630 /* Masked overflow - Push QNaN. */
7631 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7632 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7633 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7634 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7635 pFpuCtx->FTW |= RT_BIT(iNewTop);
7636 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7637 iemFpuRotateStackPush(pFpuCtx);
7638 }
7639 else
7640 {
7641 /* Exception pending - don't change TOP or the register stack. */
7642 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7643 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7644 }
7645}
7646
7647
7648DECL_NO_INLINE(IEM_STATIC, void)
7649iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7650{
7651 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7652 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7653
7654 if (pFpuCtx->FCW & X86_FCW_IM)
7655 {
7656 /* Masked overflow - Push QNaN. */
7657 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7658 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7659 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7660 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7661 pFpuCtx->FTW |= RT_BIT(iNewTop);
7662 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7663 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7664 iemFpuRotateStackPush(pFpuCtx);
7665 }
7666 else
7667 {
7668 /* Exception pending - don't change TOP or the register stack. */
7669 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7670 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7671 }
7672}
7673
7674
7675/**
7676 * Worker routine for raising an FPU stack overflow exception on a push.
7677 *
7678 * @param pFpuCtx The FPU context.
7679 */
7680IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7681{
7682 if (pFpuCtx->FCW & X86_FCW_IM)
7683 {
7684 /* Masked overflow. */
7685 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7686 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7687 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7688 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7689 pFpuCtx->FTW |= RT_BIT(iNewTop);
7690 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7691 iemFpuRotateStackPush(pFpuCtx);
7692 }
7693 else
7694 {
7695 /* Exception pending - don't change TOP or the register stack. */
7696 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7697 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7698 }
7699}
7700
7701
7702/**
7703 * Raises a FPU stack overflow exception on a push.
7704 *
7705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7706 */
7707DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7708{
7709 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7710 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7711 iemFpuStackPushOverflowOnly(pFpuCtx);
7712}
7713
7714
7715/**
7716 * Raises a FPU stack overflow exception on a push with a memory operand.
7717 *
7718 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7719 * @param iEffSeg The effective memory operand selector register.
7720 * @param GCPtrEff The effective memory operand offset.
7721 */
7722DECL_NO_INLINE(IEM_STATIC, void)
7723iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7724{
7725 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7726 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7727 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7728 iemFpuStackPushOverflowOnly(pFpuCtx);
7729}
7730
7731
7732IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7733{
7734 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7735 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7736 if (pFpuCtx->FTW & RT_BIT(iReg))
7737 return VINF_SUCCESS;
7738 return VERR_NOT_FOUND;
7739}
7740
7741
7742IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7743{
7744 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7745 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7746 if (pFpuCtx->FTW & RT_BIT(iReg))
7747 {
7748 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7749 return VINF_SUCCESS;
7750 }
7751 return VERR_NOT_FOUND;
7752}
7753
7754
7755IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7756 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7757{
7758 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7759 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7760 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7761 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7762 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7763 {
7764 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7765 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7766 return VINF_SUCCESS;
7767 }
7768 return VERR_NOT_FOUND;
7769}
7770
7771
7772IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7773{
7774 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7775 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7776 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7777 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7778 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7779 {
7780 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7781 return VINF_SUCCESS;
7782 }
7783 return VERR_NOT_FOUND;
7784}
7785
7786
7787/**
7788 * Updates the FPU exception status after FCW is changed.
7789 *
7790 * @param pFpuCtx The FPU context.
7791 */
7792IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7793{
7794 uint16_t u16Fsw = pFpuCtx->FSW;
7795 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7796 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7797 else
7798 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7799 pFpuCtx->FSW = u16Fsw;
7800}
7801
7802
7803/**
7804 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7805 *
7806 * @returns The full FTW.
7807 * @param pFpuCtx The FPU context.
7808 */
7809IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7810{
7811 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7812 uint16_t u16Ftw = 0;
7813 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7814 for (unsigned iSt = 0; iSt < 8; iSt++)
7815 {
7816 unsigned const iReg = (iSt + iTop) & 7;
7817 if (!(u8Ftw & RT_BIT(iReg)))
7818 u16Ftw |= 3 << (iReg * 2); /* empty */
7819 else
7820 {
7821 uint16_t uTag;
7822 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7823 if (pr80Reg->s.uExponent == 0x7fff)
7824 uTag = 2; /* Exponent is all 1's => Special. */
7825 else if (pr80Reg->s.uExponent == 0x0000)
7826 {
7827 if (pr80Reg->s.u64Mantissa == 0x0000)
7828 uTag = 1; /* All bits are zero => Zero. */
7829 else
7830 uTag = 2; /* Must be special. */
7831 }
7832 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7833 uTag = 0; /* Valid. */
7834 else
7835 uTag = 2; /* Must be special. */
7836
7837 u16Ftw |= uTag << (iReg * 2); /* empty */
7838 }
7839 }
7840
7841 return u16Ftw;
7842}
7843
7844
7845/**
7846 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7847 *
7848 * @returns The compressed FTW.
7849 * @param u16FullFtw The full FTW to convert.
7850 */
7851IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7852{
7853 uint8_t u8Ftw = 0;
7854 for (unsigned i = 0; i < 8; i++)
7855 {
7856 if ((u16FullFtw & 3) != 3 /*empty*/)
7857 u8Ftw |= RT_BIT(i);
7858 u16FullFtw >>= 2;
7859 }
7860
7861 return u8Ftw;
7862}
7863
7864/** @} */
7865
7866
7867/** @name Memory access.
7868 *
7869 * @{
7870 */
7871
7872
7873/**
7874 * Updates the IEMCPU::cbWritten counter if applicable.
7875 *
7876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7877 * @param fAccess The access being accounted for.
7878 * @param cbMem The access size.
7879 */
7880DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7881{
7882 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7883 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7884 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7885}
7886
7887
7888/**
7889 * Checks if the given segment can be written to, raise the appropriate
7890 * exception if not.
7891 *
7892 * @returns VBox strict status code.
7893 *
7894 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7895 * @param pHid Pointer to the hidden register.
7896 * @param iSegReg The register number.
7897 * @param pu64BaseAddr Where to return the base address to use for the
7898 * segment. (In 64-bit code it may differ from the
7899 * base in the hidden segment.)
7900 */
7901IEM_STATIC VBOXSTRICTRC
7902iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7903{
7904 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7905
7906 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7907 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7908 else
7909 {
7910 if (!pHid->Attr.n.u1Present)
7911 {
7912 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7913 AssertRelease(uSel == 0);
7914 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7915 return iemRaiseGeneralProtectionFault0(pVCpu);
7916 }
7917
7918 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7919 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7920 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7921 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7922 *pu64BaseAddr = pHid->u64Base;
7923 }
7924 return VINF_SUCCESS;
7925}
7926
7927
7928/**
7929 * Checks if the given segment can be read from, raise the appropriate
7930 * exception if not.
7931 *
7932 * @returns VBox strict status code.
7933 *
7934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7935 * @param pHid Pointer to the hidden register.
7936 * @param iSegReg The register number.
7937 * @param pu64BaseAddr Where to return the base address to use for the
7938 * segment. (In 64-bit code it may differ from the
7939 * base in the hidden segment.)
7940 */
7941IEM_STATIC VBOXSTRICTRC
7942iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7943{
7944 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7945
7946 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7947 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7948 else
7949 {
7950 if (!pHid->Attr.n.u1Present)
7951 {
7952 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7953 AssertRelease(uSel == 0);
7954 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7955 return iemRaiseGeneralProtectionFault0(pVCpu);
7956 }
7957
7958 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7959 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7960 *pu64BaseAddr = pHid->u64Base;
7961 }
7962 return VINF_SUCCESS;
7963}
7964
7965
7966/**
7967 * Applies the segment limit, base and attributes.
7968 *
7969 * This may raise a \#GP or \#SS.
7970 *
7971 * @returns VBox strict status code.
7972 *
7973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7974 * @param fAccess The kind of access which is being performed.
7975 * @param iSegReg The index of the segment register to apply.
7976 * This is UINT8_MAX if none (for IDT, GDT, LDT,
7977 * TSS, ++).
7978 * @param cbMem The access size.
7979 * @param pGCPtrMem Pointer to the guest memory address to apply
7980 * segmentation to. Input and output parameter.
7981 */
7982IEM_STATIC VBOXSTRICTRC
7983iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
7984{
7985 if (iSegReg == UINT8_MAX)
7986 return VINF_SUCCESS;
7987
7988 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7989 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
7990 switch (pVCpu->iem.s.enmCpuMode)
7991 {
7992 case IEMMODE_16BIT:
7993 case IEMMODE_32BIT:
7994 {
7995 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
7996 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
7997
7998 if ( pSel->Attr.n.u1Present
7999 && !pSel->Attr.n.u1Unusable)
8000 {
8001 Assert(pSel->Attr.n.u1DescType);
8002 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8003 {
8004 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8005 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8006 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8007
8008 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8009 {
8010 /** @todo CPL check. */
8011 }
8012
8013 /*
8014 * There are two kinds of data selectors, normal and expand down.
8015 */
8016 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8017 {
8018 if ( GCPtrFirst32 > pSel->u32Limit
8019 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8020 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8021 }
8022 else
8023 {
8024 /*
8025 * The upper boundary is defined by the B bit, not the G bit!
8026 */
8027 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8028 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8029 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8030 }
8031 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8032 }
8033 else
8034 {
8035
8036 /*
8037 * Code selector and usually be used to read thru, writing is
8038 * only permitted in real and V8086 mode.
8039 */
8040 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8041 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8042 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8043 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8044 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8045
8046 if ( GCPtrFirst32 > pSel->u32Limit
8047 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8048 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8049
8050 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8051 {
8052 /** @todo CPL check. */
8053 }
8054
8055 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8056 }
8057 }
8058 else
8059 return iemRaiseGeneralProtectionFault0(pVCpu);
8060 return VINF_SUCCESS;
8061 }
8062
8063 case IEMMODE_64BIT:
8064 {
8065 RTGCPTR GCPtrMem = *pGCPtrMem;
8066 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8067 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8068
8069 Assert(cbMem >= 1);
8070 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8071 return VINF_SUCCESS;
8072 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8073 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8074 return iemRaiseGeneralProtectionFault0(pVCpu);
8075 }
8076
8077 default:
8078 AssertFailedReturn(VERR_IEM_IPE_7);
8079 }
8080}
8081
8082
8083/**
8084 * Translates a virtual address to a physical physical address and checks if we
8085 * can access the page as specified.
8086 *
8087 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8088 * @param GCPtrMem The virtual address.
8089 * @param fAccess The intended access.
8090 * @param pGCPhysMem Where to return the physical address.
8091 */
8092IEM_STATIC VBOXSTRICTRC
8093iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8094{
8095 /** @todo Need a different PGM interface here. We're currently using
8096 * generic / REM interfaces. this won't cut it for R0 & RC. */
8097 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8098 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8099 RTGCPHYS GCPhys;
8100 uint64_t fFlags;
8101 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8102 if (RT_FAILURE(rc))
8103 {
8104 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8105 /** @todo Check unassigned memory in unpaged mode. */
8106 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8107 *pGCPhysMem = NIL_RTGCPHYS;
8108 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8109 }
8110
8111 /* If the page is writable and does not have the no-exec bit set, all
8112 access is allowed. Otherwise we'll have to check more carefully... */
8113 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8114 {
8115 /* Write to read only memory? */
8116 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8117 && !(fFlags & X86_PTE_RW)
8118 && ( (pVCpu->iem.s.uCpl == 3
8119 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8120 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8121 {
8122 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8123 *pGCPhysMem = NIL_RTGCPHYS;
8124 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8125 }
8126
8127 /* Kernel memory accessed by userland? */
8128 if ( !(fFlags & X86_PTE_US)
8129 && pVCpu->iem.s.uCpl == 3
8130 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8131 {
8132 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8133 *pGCPhysMem = NIL_RTGCPHYS;
8134 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8135 }
8136
8137 /* Executing non-executable memory? */
8138 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8139 && (fFlags & X86_PTE_PAE_NX)
8140 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8141 {
8142 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8143 *pGCPhysMem = NIL_RTGCPHYS;
8144 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8145 VERR_ACCESS_DENIED);
8146 }
8147 }
8148
8149 /*
8150 * Set the dirty / access flags.
8151 * ASSUMES this is set when the address is translated rather than on committ...
8152 */
8153 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8154 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8155 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8156 {
8157 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8158 AssertRC(rc2);
8159 }
8160
8161 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8162 *pGCPhysMem = GCPhys;
8163 return VINF_SUCCESS;
8164}
8165
8166
8167
8168/**
8169 * Maps a physical page.
8170 *
8171 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8173 * @param GCPhysMem The physical address.
8174 * @param fAccess The intended access.
8175 * @param ppvMem Where to return the mapping address.
8176 * @param pLock The PGM lock.
8177 */
8178IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8179{
8180#ifdef IEM_LOG_MEMORY_WRITES
8181 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8182 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8183#endif
8184
8185 /** @todo This API may require some improving later. A private deal with PGM
8186 * regarding locking and unlocking needs to be struct. A couple of TLBs
8187 * living in PGM, but with publicly accessible inlined access methods
8188 * could perhaps be an even better solution. */
8189 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8190 GCPhysMem,
8191 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8192 pVCpu->iem.s.fBypassHandlers,
8193 ppvMem,
8194 pLock);
8195 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8196 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8197
8198 return rc;
8199}
8200
8201
8202/**
8203 * Unmap a page previously mapped by iemMemPageMap.
8204 *
8205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8206 * @param GCPhysMem The physical address.
8207 * @param fAccess The intended access.
8208 * @param pvMem What iemMemPageMap returned.
8209 * @param pLock The PGM lock.
8210 */
8211DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8212{
8213 NOREF(pVCpu);
8214 NOREF(GCPhysMem);
8215 NOREF(fAccess);
8216 NOREF(pvMem);
8217 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8218}
8219
8220
8221/**
8222 * Looks up a memory mapping entry.
8223 *
8224 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8225 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8226 * @param pvMem The memory address.
8227 * @param fAccess The access to.
8228 */
8229DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8230{
8231 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8232 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8233 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8234 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8235 return 0;
8236 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8237 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8238 return 1;
8239 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8240 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8241 return 2;
8242 return VERR_NOT_FOUND;
8243}
8244
8245
8246/**
8247 * Finds a free memmap entry when using iNextMapping doesn't work.
8248 *
8249 * @returns Memory mapping index, 1024 on failure.
8250 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8251 */
8252IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8253{
8254 /*
8255 * The easy case.
8256 */
8257 if (pVCpu->iem.s.cActiveMappings == 0)
8258 {
8259 pVCpu->iem.s.iNextMapping = 1;
8260 return 0;
8261 }
8262
8263 /* There should be enough mappings for all instructions. */
8264 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8265
8266 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8267 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8268 return i;
8269
8270 AssertFailedReturn(1024);
8271}
8272
8273
8274/**
8275 * Commits a bounce buffer that needs writing back and unmaps it.
8276 *
8277 * @returns Strict VBox status code.
8278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8279 * @param iMemMap The index of the buffer to commit.
8280 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8281 * Always false in ring-3, obviously.
8282 */
8283IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8284{
8285 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8286 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8287#ifdef IN_RING3
8288 Assert(!fPostponeFail);
8289 RT_NOREF_PV(fPostponeFail);
8290#endif
8291
8292 /*
8293 * Do the writing.
8294 */
8295 PVM pVM = pVCpu->CTX_SUFF(pVM);
8296 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8297 {
8298 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8299 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8300 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8301 if (!pVCpu->iem.s.fBypassHandlers)
8302 {
8303 /*
8304 * Carefully and efficiently dealing with access handler return
8305 * codes make this a little bloated.
8306 */
8307 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8308 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8309 pbBuf,
8310 cbFirst,
8311 PGMACCESSORIGIN_IEM);
8312 if (rcStrict == VINF_SUCCESS)
8313 {
8314 if (cbSecond)
8315 {
8316 rcStrict = PGMPhysWrite(pVM,
8317 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8318 pbBuf + cbFirst,
8319 cbSecond,
8320 PGMACCESSORIGIN_IEM);
8321 if (rcStrict == VINF_SUCCESS)
8322 { /* nothing */ }
8323 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8324 {
8325 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8326 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8327 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8328 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8329 }
8330#ifndef IN_RING3
8331 else if (fPostponeFail)
8332 {
8333 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8334 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8335 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8336 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8337 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8338 return iemSetPassUpStatus(pVCpu, rcStrict);
8339 }
8340#endif
8341 else
8342 {
8343 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8344 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8345 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8346 return rcStrict;
8347 }
8348 }
8349 }
8350 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8351 {
8352 if (!cbSecond)
8353 {
8354 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8355 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8356 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8357 }
8358 else
8359 {
8360 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8361 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8362 pbBuf + cbFirst,
8363 cbSecond,
8364 PGMACCESSORIGIN_IEM);
8365 if (rcStrict2 == VINF_SUCCESS)
8366 {
8367 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8368 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8369 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8370 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8371 }
8372 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8373 {
8374 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8376 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8377 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8378 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8379 }
8380#ifndef IN_RING3
8381 else if (fPostponeFail)
8382 {
8383 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8385 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8386 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8387 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8388 return iemSetPassUpStatus(pVCpu, rcStrict);
8389 }
8390#endif
8391 else
8392 {
8393 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8395 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8396 return rcStrict2;
8397 }
8398 }
8399 }
8400#ifndef IN_RING3
8401 else if (fPostponeFail)
8402 {
8403 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8404 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8406 if (!cbSecond)
8407 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8408 else
8409 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8410 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8411 return iemSetPassUpStatus(pVCpu, rcStrict);
8412 }
8413#endif
8414 else
8415 {
8416 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8419 return rcStrict;
8420 }
8421 }
8422 else
8423 {
8424 /*
8425 * No access handlers, much simpler.
8426 */
8427 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8428 if (RT_SUCCESS(rc))
8429 {
8430 if (cbSecond)
8431 {
8432 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8433 if (RT_SUCCESS(rc))
8434 { /* likely */ }
8435 else
8436 {
8437 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8438 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8439 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8440 return rc;
8441 }
8442 }
8443 }
8444 else
8445 {
8446 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8447 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8448 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8449 return rc;
8450 }
8451 }
8452 }
8453
8454#if defined(IEM_LOG_MEMORY_WRITES)
8455 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8456 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8457 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8458 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8459 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8460 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8461
8462 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8463 g_cbIemWrote = cbWrote;
8464 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8465#endif
8466
8467 /*
8468 * Free the mapping entry.
8469 */
8470 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8471 Assert(pVCpu->iem.s.cActiveMappings != 0);
8472 pVCpu->iem.s.cActiveMappings--;
8473 return VINF_SUCCESS;
8474}
8475
8476
8477/**
8478 * iemMemMap worker that deals with a request crossing pages.
8479 */
8480IEM_STATIC VBOXSTRICTRC
8481iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8482{
8483 /*
8484 * Do the address translations.
8485 */
8486 RTGCPHYS GCPhysFirst;
8487 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8488 if (rcStrict != VINF_SUCCESS)
8489 return rcStrict;
8490
8491 RTGCPHYS GCPhysSecond;
8492 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8493 fAccess, &GCPhysSecond);
8494 if (rcStrict != VINF_SUCCESS)
8495 return rcStrict;
8496 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8497
8498 PVM pVM = pVCpu->CTX_SUFF(pVM);
8499
8500 /*
8501 * Read in the current memory content if it's a read, execute or partial
8502 * write access.
8503 */
8504 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8505 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8506 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8507
8508 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8509 {
8510 if (!pVCpu->iem.s.fBypassHandlers)
8511 {
8512 /*
8513 * Must carefully deal with access handler status codes here,
8514 * makes the code a bit bloated.
8515 */
8516 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8517 if (rcStrict == VINF_SUCCESS)
8518 {
8519 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8520 if (rcStrict == VINF_SUCCESS)
8521 { /*likely */ }
8522 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8523 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8524 else
8525 {
8526 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8527 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8528 return rcStrict;
8529 }
8530 }
8531 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8532 {
8533 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8534 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8535 {
8536 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8537 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8538 }
8539 else
8540 {
8541 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8542 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8543 return rcStrict2;
8544 }
8545 }
8546 else
8547 {
8548 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8549 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8550 return rcStrict;
8551 }
8552 }
8553 else
8554 {
8555 /*
8556 * No informational status codes here, much more straight forward.
8557 */
8558 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8559 if (RT_SUCCESS(rc))
8560 {
8561 Assert(rc == VINF_SUCCESS);
8562 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8563 if (RT_SUCCESS(rc))
8564 Assert(rc == VINF_SUCCESS);
8565 else
8566 {
8567 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8568 return rc;
8569 }
8570 }
8571 else
8572 {
8573 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8574 return rc;
8575 }
8576 }
8577 }
8578#ifdef VBOX_STRICT
8579 else
8580 memset(pbBuf, 0xcc, cbMem);
8581 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8582 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8583#endif
8584
8585 /*
8586 * Commit the bounce buffer entry.
8587 */
8588 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8589 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8590 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8591 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8592 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8593 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8594 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8595 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8596 pVCpu->iem.s.cActiveMappings++;
8597
8598 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8599 *ppvMem = pbBuf;
8600 return VINF_SUCCESS;
8601}
8602
8603
8604/**
8605 * iemMemMap woker that deals with iemMemPageMap failures.
8606 */
8607IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8608 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8609{
8610 /*
8611 * Filter out conditions we can handle and the ones which shouldn't happen.
8612 */
8613 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8614 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8615 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8616 {
8617 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8618 return rcMap;
8619 }
8620 pVCpu->iem.s.cPotentialExits++;
8621
8622 /*
8623 * Read in the current memory content if it's a read, execute or partial
8624 * write access.
8625 */
8626 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8627 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8628 {
8629 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8630 memset(pbBuf, 0xff, cbMem);
8631 else
8632 {
8633 int rc;
8634 if (!pVCpu->iem.s.fBypassHandlers)
8635 {
8636 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8637 if (rcStrict == VINF_SUCCESS)
8638 { /* nothing */ }
8639 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8640 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8641 else
8642 {
8643 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8644 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8645 return rcStrict;
8646 }
8647 }
8648 else
8649 {
8650 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8651 if (RT_SUCCESS(rc))
8652 { /* likely */ }
8653 else
8654 {
8655 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8656 GCPhysFirst, rc));
8657 return rc;
8658 }
8659 }
8660 }
8661 }
8662#ifdef VBOX_STRICT
8663 else
8664 memset(pbBuf, 0xcc, cbMem);
8665#endif
8666#ifdef VBOX_STRICT
8667 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8668 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8669#endif
8670
8671 /*
8672 * Commit the bounce buffer entry.
8673 */
8674 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8675 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8676 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8677 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8678 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8679 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8680 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8681 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8682 pVCpu->iem.s.cActiveMappings++;
8683
8684 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8685 *ppvMem = pbBuf;
8686 return VINF_SUCCESS;
8687}
8688
8689
8690
8691/**
8692 * Maps the specified guest memory for the given kind of access.
8693 *
8694 * This may be using bounce buffering of the memory if it's crossing a page
8695 * boundary or if there is an access handler installed for any of it. Because
8696 * of lock prefix guarantees, we're in for some extra clutter when this
8697 * happens.
8698 *
8699 * This may raise a \#GP, \#SS, \#PF or \#AC.
8700 *
8701 * @returns VBox strict status code.
8702 *
8703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8704 * @param ppvMem Where to return the pointer to the mapped
8705 * memory.
8706 * @param cbMem The number of bytes to map. This is usually 1,
8707 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8708 * string operations it can be up to a page.
8709 * @param iSegReg The index of the segment register to use for
8710 * this access. The base and limits are checked.
8711 * Use UINT8_MAX to indicate that no segmentation
8712 * is required (for IDT, GDT and LDT accesses).
8713 * @param GCPtrMem The address of the guest memory.
8714 * @param fAccess How the memory is being accessed. The
8715 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8716 * how to map the memory, while the
8717 * IEM_ACCESS_WHAT_XXX bit is used when raising
8718 * exceptions.
8719 */
8720IEM_STATIC VBOXSTRICTRC
8721iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8722{
8723 /*
8724 * Check the input and figure out which mapping entry to use.
8725 */
8726 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8727 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8728 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8729
8730 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8731 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8732 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8733 {
8734 iMemMap = iemMemMapFindFree(pVCpu);
8735 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8736 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8737 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8738 pVCpu->iem.s.aMemMappings[2].fAccess),
8739 VERR_IEM_IPE_9);
8740 }
8741
8742 /*
8743 * Map the memory, checking that we can actually access it. If something
8744 * slightly complicated happens, fall back on bounce buffering.
8745 */
8746 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8747 if (rcStrict != VINF_SUCCESS)
8748 return rcStrict;
8749
8750 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8751 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8752
8753 RTGCPHYS GCPhysFirst;
8754 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8755 if (rcStrict != VINF_SUCCESS)
8756 return rcStrict;
8757
8758 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8759 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8760 if (fAccess & IEM_ACCESS_TYPE_READ)
8761 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8762
8763 void *pvMem;
8764 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8765 if (rcStrict != VINF_SUCCESS)
8766 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8767
8768 /*
8769 * Fill in the mapping table entry.
8770 */
8771 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8772 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8773 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8774 pVCpu->iem.s.cActiveMappings++;
8775
8776 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8777 *ppvMem = pvMem;
8778 return VINF_SUCCESS;
8779}
8780
8781
8782/**
8783 * Commits the guest memory if bounce buffered and unmaps it.
8784 *
8785 * @returns Strict VBox status code.
8786 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8787 * @param pvMem The mapping.
8788 * @param fAccess The kind of access.
8789 */
8790IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8791{
8792 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8793 AssertReturn(iMemMap >= 0, iMemMap);
8794
8795 /* If it's bounce buffered, we may need to write back the buffer. */
8796 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8797 {
8798 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8799 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8800 }
8801 /* Otherwise unlock it. */
8802 else
8803 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8804
8805 /* Free the entry. */
8806 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8807 Assert(pVCpu->iem.s.cActiveMappings != 0);
8808 pVCpu->iem.s.cActiveMappings--;
8809 return VINF_SUCCESS;
8810}
8811
8812#ifdef IEM_WITH_SETJMP
8813
8814/**
8815 * Maps the specified guest memory for the given kind of access, longjmp on
8816 * error.
8817 *
8818 * This may be using bounce buffering of the memory if it's crossing a page
8819 * boundary or if there is an access handler installed for any of it. Because
8820 * of lock prefix guarantees, we're in for some extra clutter when this
8821 * happens.
8822 *
8823 * This may raise a \#GP, \#SS, \#PF or \#AC.
8824 *
8825 * @returns Pointer to the mapped memory.
8826 *
8827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8828 * @param cbMem The number of bytes to map. This is usually 1,
8829 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8830 * string operations it can be up to a page.
8831 * @param iSegReg The index of the segment register to use for
8832 * this access. The base and limits are checked.
8833 * Use UINT8_MAX to indicate that no segmentation
8834 * is required (for IDT, GDT and LDT accesses).
8835 * @param GCPtrMem The address of the guest memory.
8836 * @param fAccess How the memory is being accessed. The
8837 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8838 * how to map the memory, while the
8839 * IEM_ACCESS_WHAT_XXX bit is used when raising
8840 * exceptions.
8841 */
8842IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8843{
8844 /*
8845 * Check the input and figure out which mapping entry to use.
8846 */
8847 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8848 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8849 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8850
8851 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8852 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8853 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8854 {
8855 iMemMap = iemMemMapFindFree(pVCpu);
8856 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8857 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8858 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8859 pVCpu->iem.s.aMemMappings[2].fAccess),
8860 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8861 }
8862
8863 /*
8864 * Map the memory, checking that we can actually access it. If something
8865 * slightly complicated happens, fall back on bounce buffering.
8866 */
8867 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8868 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8869 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8870
8871 /* Crossing a page boundary? */
8872 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8873 { /* No (likely). */ }
8874 else
8875 {
8876 void *pvMem;
8877 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8878 if (rcStrict == VINF_SUCCESS)
8879 return pvMem;
8880 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8881 }
8882
8883 RTGCPHYS GCPhysFirst;
8884 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8885 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8886 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8887
8888 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8889 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8890 if (fAccess & IEM_ACCESS_TYPE_READ)
8891 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8892
8893 void *pvMem;
8894 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8895 if (rcStrict == VINF_SUCCESS)
8896 { /* likely */ }
8897 else
8898 {
8899 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8900 if (rcStrict == VINF_SUCCESS)
8901 return pvMem;
8902 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8903 }
8904
8905 /*
8906 * Fill in the mapping table entry.
8907 */
8908 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8909 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8910 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8911 pVCpu->iem.s.cActiveMappings++;
8912
8913 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8914 return pvMem;
8915}
8916
8917
8918/**
8919 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8920 *
8921 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8922 * @param pvMem The mapping.
8923 * @param fAccess The kind of access.
8924 */
8925IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8926{
8927 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8928 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8929
8930 /* If it's bounce buffered, we may need to write back the buffer. */
8931 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8932 {
8933 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8934 {
8935 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8936 if (rcStrict == VINF_SUCCESS)
8937 return;
8938 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8939 }
8940 }
8941 /* Otherwise unlock it. */
8942 else
8943 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8944
8945 /* Free the entry. */
8946 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8947 Assert(pVCpu->iem.s.cActiveMappings != 0);
8948 pVCpu->iem.s.cActiveMappings--;
8949}
8950
8951#endif /* IEM_WITH_SETJMP */
8952
8953#ifndef IN_RING3
8954/**
8955 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8956 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8957 *
8958 * Allows the instruction to be completed and retired, while the IEM user will
8959 * return to ring-3 immediately afterwards and do the postponed writes there.
8960 *
8961 * @returns VBox status code (no strict statuses). Caller must check
8962 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
8963 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8964 * @param pvMem The mapping.
8965 * @param fAccess The kind of access.
8966 */
8967IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8968{
8969 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8970 AssertReturn(iMemMap >= 0, iMemMap);
8971
8972 /* If it's bounce buffered, we may need to write back the buffer. */
8973 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8974 {
8975 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8976 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
8977 }
8978 /* Otherwise unlock it. */
8979 else
8980 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8981
8982 /* Free the entry. */
8983 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8984 Assert(pVCpu->iem.s.cActiveMappings != 0);
8985 pVCpu->iem.s.cActiveMappings--;
8986 return VINF_SUCCESS;
8987}
8988#endif
8989
8990
8991/**
8992 * Rollbacks mappings, releasing page locks and such.
8993 *
8994 * The caller shall only call this after checking cActiveMappings.
8995 *
8996 * @returns Strict VBox status code to pass up.
8997 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8998 */
8999IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9000{
9001 Assert(pVCpu->iem.s.cActiveMappings > 0);
9002
9003 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9004 while (iMemMap-- > 0)
9005 {
9006 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9007 if (fAccess != IEM_ACCESS_INVALID)
9008 {
9009 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9010 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9011 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9012 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9013 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9014 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9015 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9016 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9017 pVCpu->iem.s.cActiveMappings--;
9018 }
9019 }
9020}
9021
9022
9023/**
9024 * Fetches a data byte.
9025 *
9026 * @returns Strict VBox status code.
9027 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9028 * @param pu8Dst Where to return the byte.
9029 * @param iSegReg The index of the segment register to use for
9030 * this access. The base and limits are checked.
9031 * @param GCPtrMem The address of the guest memory.
9032 */
9033IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9034{
9035 /* The lazy approach for now... */
9036 uint8_t const *pu8Src;
9037 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9038 if (rc == VINF_SUCCESS)
9039 {
9040 *pu8Dst = *pu8Src;
9041 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9042 }
9043 return rc;
9044}
9045
9046
9047#ifdef IEM_WITH_SETJMP
9048/**
9049 * Fetches a data byte, longjmp on error.
9050 *
9051 * @returns The byte.
9052 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9053 * @param iSegReg The index of the segment register to use for
9054 * this access. The base and limits are checked.
9055 * @param GCPtrMem The address of the guest memory.
9056 */
9057DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9058{
9059 /* The lazy approach for now... */
9060 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9061 uint8_t const bRet = *pu8Src;
9062 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9063 return bRet;
9064}
9065#endif /* IEM_WITH_SETJMP */
9066
9067
9068/**
9069 * Fetches a data word.
9070 *
9071 * @returns Strict VBox status code.
9072 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9073 * @param pu16Dst Where to return the word.
9074 * @param iSegReg The index of the segment register to use for
9075 * this access. The base and limits are checked.
9076 * @param GCPtrMem The address of the guest memory.
9077 */
9078IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9079{
9080 /* The lazy approach for now... */
9081 uint16_t const *pu16Src;
9082 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9083 if (rc == VINF_SUCCESS)
9084 {
9085 *pu16Dst = *pu16Src;
9086 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9087 }
9088 return rc;
9089}
9090
9091
9092#ifdef IEM_WITH_SETJMP
9093/**
9094 * Fetches a data word, longjmp on error.
9095 *
9096 * @returns The word
9097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9098 * @param iSegReg The index of the segment register to use for
9099 * this access. The base and limits are checked.
9100 * @param GCPtrMem The address of the guest memory.
9101 */
9102DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9103{
9104 /* The lazy approach for now... */
9105 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9106 uint16_t const u16Ret = *pu16Src;
9107 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9108 return u16Ret;
9109}
9110#endif
9111
9112
9113/**
9114 * Fetches a data dword.
9115 *
9116 * @returns Strict VBox status code.
9117 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9118 * @param pu32Dst Where to return the dword.
9119 * @param iSegReg The index of the segment register to use for
9120 * this access. The base and limits are checked.
9121 * @param GCPtrMem The address of the guest memory.
9122 */
9123IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9124{
9125 /* The lazy approach for now... */
9126 uint32_t const *pu32Src;
9127 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9128 if (rc == VINF_SUCCESS)
9129 {
9130 *pu32Dst = *pu32Src;
9131 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9132 }
9133 return rc;
9134}
9135
9136
9137#ifdef IEM_WITH_SETJMP
9138
9139IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9140{
9141 Assert(cbMem >= 1);
9142 Assert(iSegReg < X86_SREG_COUNT);
9143
9144 /*
9145 * 64-bit mode is simpler.
9146 */
9147 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9148 {
9149 if (iSegReg >= X86_SREG_FS)
9150 {
9151 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9152 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9153 GCPtrMem += pSel->u64Base;
9154 }
9155
9156 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9157 return GCPtrMem;
9158 }
9159 /*
9160 * 16-bit and 32-bit segmentation.
9161 */
9162 else
9163 {
9164 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9165 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9166 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9167 == X86DESCATTR_P /* data, expand up */
9168 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9169 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9170 {
9171 /* expand up */
9172 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9173 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9174 && GCPtrLast32 > (uint32_t)GCPtrMem))
9175 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9176 }
9177 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9178 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9179 {
9180 /* expand down */
9181 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9182 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9183 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9184 && GCPtrLast32 > (uint32_t)GCPtrMem))
9185 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9186 }
9187 else
9188 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9189 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9190 }
9191 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9192}
9193
9194
9195IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9196{
9197 Assert(cbMem >= 1);
9198 Assert(iSegReg < X86_SREG_COUNT);
9199
9200 /*
9201 * 64-bit mode is simpler.
9202 */
9203 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9204 {
9205 if (iSegReg >= X86_SREG_FS)
9206 {
9207 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9208 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9209 GCPtrMem += pSel->u64Base;
9210 }
9211
9212 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9213 return GCPtrMem;
9214 }
9215 /*
9216 * 16-bit and 32-bit segmentation.
9217 */
9218 else
9219 {
9220 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9221 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9222 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9223 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9224 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9225 {
9226 /* expand up */
9227 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9228 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9229 && GCPtrLast32 > (uint32_t)GCPtrMem))
9230 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9231 }
9232 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9233 {
9234 /* expand down */
9235 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9236 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9237 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9238 && GCPtrLast32 > (uint32_t)GCPtrMem))
9239 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9240 }
9241 else
9242 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9243 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9244 }
9245 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9246}
9247
9248
9249/**
9250 * Fetches a data dword, longjmp on error, fallback/safe version.
9251 *
9252 * @returns The dword
9253 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9254 * @param iSegReg The index of the segment register to use for
9255 * this access. The base and limits are checked.
9256 * @param GCPtrMem The address of the guest memory.
9257 */
9258IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9259{
9260 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9261 uint32_t const u32Ret = *pu32Src;
9262 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9263 return u32Ret;
9264}
9265
9266
9267/**
9268 * Fetches a data dword, longjmp on error.
9269 *
9270 * @returns The dword
9271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9272 * @param iSegReg The index of the segment register to use for
9273 * this access. The base and limits are checked.
9274 * @param GCPtrMem The address of the guest memory.
9275 */
9276DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9277{
9278# ifdef IEM_WITH_DATA_TLB
9279 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9280 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9281 {
9282 /// @todo more later.
9283 }
9284
9285 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9286# else
9287 /* The lazy approach. */
9288 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9289 uint32_t const u32Ret = *pu32Src;
9290 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9291 return u32Ret;
9292# endif
9293}
9294#endif
9295
9296
9297#ifdef SOME_UNUSED_FUNCTION
9298/**
9299 * Fetches a data dword and sign extends it to a qword.
9300 *
9301 * @returns Strict VBox status code.
9302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9303 * @param pu64Dst Where to return the sign extended value.
9304 * @param iSegReg The index of the segment register to use for
9305 * this access. The base and limits are checked.
9306 * @param GCPtrMem The address of the guest memory.
9307 */
9308IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9309{
9310 /* The lazy approach for now... */
9311 int32_t const *pi32Src;
9312 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9313 if (rc == VINF_SUCCESS)
9314 {
9315 *pu64Dst = *pi32Src;
9316 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9317 }
9318#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9319 else
9320 *pu64Dst = 0;
9321#endif
9322 return rc;
9323}
9324#endif
9325
9326
9327/**
9328 * Fetches a data qword.
9329 *
9330 * @returns Strict VBox status code.
9331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9332 * @param pu64Dst Where to return the qword.
9333 * @param iSegReg The index of the segment register to use for
9334 * this access. The base and limits are checked.
9335 * @param GCPtrMem The address of the guest memory.
9336 */
9337IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9338{
9339 /* The lazy approach for now... */
9340 uint64_t const *pu64Src;
9341 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9342 if (rc == VINF_SUCCESS)
9343 {
9344 *pu64Dst = *pu64Src;
9345 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9346 }
9347 return rc;
9348}
9349
9350
9351#ifdef IEM_WITH_SETJMP
9352/**
9353 * Fetches a data qword, longjmp on error.
9354 *
9355 * @returns The qword.
9356 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9357 * @param iSegReg The index of the segment register to use for
9358 * this access. The base and limits are checked.
9359 * @param GCPtrMem The address of the guest memory.
9360 */
9361DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9362{
9363 /* The lazy approach for now... */
9364 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9365 uint64_t const u64Ret = *pu64Src;
9366 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9367 return u64Ret;
9368}
9369#endif
9370
9371
9372/**
9373 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9374 *
9375 * @returns Strict VBox status code.
9376 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9377 * @param pu64Dst Where to return the qword.
9378 * @param iSegReg The index of the segment register to use for
9379 * this access. The base and limits are checked.
9380 * @param GCPtrMem The address of the guest memory.
9381 */
9382IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9383{
9384 /* The lazy approach for now... */
9385 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9386 if (RT_UNLIKELY(GCPtrMem & 15))
9387 return iemRaiseGeneralProtectionFault0(pVCpu);
9388
9389 uint64_t const *pu64Src;
9390 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9391 if (rc == VINF_SUCCESS)
9392 {
9393 *pu64Dst = *pu64Src;
9394 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9395 }
9396 return rc;
9397}
9398
9399
9400#ifdef IEM_WITH_SETJMP
9401/**
9402 * Fetches a data qword, longjmp on error.
9403 *
9404 * @returns The qword.
9405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9406 * @param iSegReg The index of the segment register to use for
9407 * this access. The base and limits are checked.
9408 * @param GCPtrMem The address of the guest memory.
9409 */
9410DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9411{
9412 /* The lazy approach for now... */
9413 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9414 if (RT_LIKELY(!(GCPtrMem & 15)))
9415 {
9416 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9417 uint64_t const u64Ret = *pu64Src;
9418 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9419 return u64Ret;
9420 }
9421
9422 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9423 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9424}
9425#endif
9426
9427
9428/**
9429 * Fetches a data tword.
9430 *
9431 * @returns Strict VBox status code.
9432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9433 * @param pr80Dst Where to return the tword.
9434 * @param iSegReg The index of the segment register to use for
9435 * this access. The base and limits are checked.
9436 * @param GCPtrMem The address of the guest memory.
9437 */
9438IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9439{
9440 /* The lazy approach for now... */
9441 PCRTFLOAT80U pr80Src;
9442 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9443 if (rc == VINF_SUCCESS)
9444 {
9445 *pr80Dst = *pr80Src;
9446 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9447 }
9448 return rc;
9449}
9450
9451
9452#ifdef IEM_WITH_SETJMP
9453/**
9454 * Fetches a data tword, longjmp on error.
9455 *
9456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9457 * @param pr80Dst Where to return the tword.
9458 * @param iSegReg The index of the segment register to use for
9459 * this access. The base and limits are checked.
9460 * @param GCPtrMem The address of the guest memory.
9461 */
9462DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9463{
9464 /* The lazy approach for now... */
9465 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9466 *pr80Dst = *pr80Src;
9467 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9468}
9469#endif
9470
9471
9472/**
9473 * Fetches a data dqword (double qword), generally SSE related.
9474 *
9475 * @returns Strict VBox status code.
9476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9477 * @param pu128Dst Where to return the qword.
9478 * @param iSegReg The index of the segment register to use for
9479 * this access. The base and limits are checked.
9480 * @param GCPtrMem The address of the guest memory.
9481 */
9482IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9483{
9484 /* The lazy approach for now... */
9485 PCRTUINT128U pu128Src;
9486 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9487 if (rc == VINF_SUCCESS)
9488 {
9489 pu128Dst->au64[0] = pu128Src->au64[0];
9490 pu128Dst->au64[1] = pu128Src->au64[1];
9491 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9492 }
9493 return rc;
9494}
9495
9496
9497#ifdef IEM_WITH_SETJMP
9498/**
9499 * Fetches a data dqword (double qword), generally SSE related.
9500 *
9501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9502 * @param pu128Dst Where to return the qword.
9503 * @param iSegReg The index of the segment register to use for
9504 * this access. The base and limits are checked.
9505 * @param GCPtrMem The address of the guest memory.
9506 */
9507IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9508{
9509 /* The lazy approach for now... */
9510 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9511 pu128Dst->au64[0] = pu128Src->au64[0];
9512 pu128Dst->au64[1] = pu128Src->au64[1];
9513 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9514}
9515#endif
9516
9517
9518/**
9519 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9520 * related.
9521 *
9522 * Raises \#GP(0) if not aligned.
9523 *
9524 * @returns Strict VBox status code.
9525 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9526 * @param pu128Dst Where to return the qword.
9527 * @param iSegReg The index of the segment register to use for
9528 * this access. The base and limits are checked.
9529 * @param GCPtrMem The address of the guest memory.
9530 */
9531IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9532{
9533 /* The lazy approach for now... */
9534 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9535 if ( (GCPtrMem & 15)
9536 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9537 return iemRaiseGeneralProtectionFault0(pVCpu);
9538
9539 PCRTUINT128U pu128Src;
9540 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9541 if (rc == VINF_SUCCESS)
9542 {
9543 pu128Dst->au64[0] = pu128Src->au64[0];
9544 pu128Dst->au64[1] = pu128Src->au64[1];
9545 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9546 }
9547 return rc;
9548}
9549
9550
9551#ifdef IEM_WITH_SETJMP
9552/**
9553 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9554 * related, longjmp on error.
9555 *
9556 * Raises \#GP(0) if not aligned.
9557 *
9558 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9559 * @param pu128Dst Where to return the qword.
9560 * @param iSegReg The index of the segment register to use for
9561 * this access. The base and limits are checked.
9562 * @param GCPtrMem The address of the guest memory.
9563 */
9564DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9565{
9566 /* The lazy approach for now... */
9567 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9568 if ( (GCPtrMem & 15) == 0
9569 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9570 {
9571 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9572 pu128Dst->au64[0] = pu128Src->au64[0];
9573 pu128Dst->au64[1] = pu128Src->au64[1];
9574 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9575 return;
9576 }
9577
9578 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9579 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9580}
9581#endif
9582
9583
9584/**
9585 * Fetches a data oword (octo word), generally AVX related.
9586 *
9587 * @returns Strict VBox status code.
9588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9589 * @param pu256Dst Where to return the qword.
9590 * @param iSegReg The index of the segment register to use for
9591 * this access. The base and limits are checked.
9592 * @param GCPtrMem The address of the guest memory.
9593 */
9594IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9595{
9596 /* The lazy approach for now... */
9597 PCRTUINT256U pu256Src;
9598 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9599 if (rc == VINF_SUCCESS)
9600 {
9601 pu256Dst->au64[0] = pu256Src->au64[0];
9602 pu256Dst->au64[1] = pu256Src->au64[1];
9603 pu256Dst->au64[2] = pu256Src->au64[2];
9604 pu256Dst->au64[3] = pu256Src->au64[3];
9605 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9606 }
9607 return rc;
9608}
9609
9610
9611#ifdef IEM_WITH_SETJMP
9612/**
9613 * Fetches a data oword (octo word), generally AVX related.
9614 *
9615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9616 * @param pu256Dst Where to return the qword.
9617 * @param iSegReg The index of the segment register to use for
9618 * this access. The base and limits are checked.
9619 * @param GCPtrMem The address of the guest memory.
9620 */
9621IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9622{
9623 /* The lazy approach for now... */
9624 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9625 pu256Dst->au64[0] = pu256Src->au64[0];
9626 pu256Dst->au64[1] = pu256Src->au64[1];
9627 pu256Dst->au64[2] = pu256Src->au64[2];
9628 pu256Dst->au64[3] = pu256Src->au64[3];
9629 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9630}
9631#endif
9632
9633
9634/**
9635 * Fetches a data oword (octo word) at an aligned address, generally AVX
9636 * related.
9637 *
9638 * Raises \#GP(0) if not aligned.
9639 *
9640 * @returns Strict VBox status code.
9641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9642 * @param pu256Dst Where to return the qword.
9643 * @param iSegReg The index of the segment register to use for
9644 * this access. The base and limits are checked.
9645 * @param GCPtrMem The address of the guest memory.
9646 */
9647IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9648{
9649 /* The lazy approach for now... */
9650 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9651 if (GCPtrMem & 31)
9652 return iemRaiseGeneralProtectionFault0(pVCpu);
9653
9654 PCRTUINT256U pu256Src;
9655 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9656 if (rc == VINF_SUCCESS)
9657 {
9658 pu256Dst->au64[0] = pu256Src->au64[0];
9659 pu256Dst->au64[1] = pu256Src->au64[1];
9660 pu256Dst->au64[2] = pu256Src->au64[2];
9661 pu256Dst->au64[3] = pu256Src->au64[3];
9662 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9663 }
9664 return rc;
9665}
9666
9667
9668#ifdef IEM_WITH_SETJMP
9669/**
9670 * Fetches a data oword (octo word) at an aligned address, generally AVX
9671 * related, longjmp on error.
9672 *
9673 * Raises \#GP(0) if not aligned.
9674 *
9675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9676 * @param pu256Dst Where to return the qword.
9677 * @param iSegReg The index of the segment register to use for
9678 * this access. The base and limits are checked.
9679 * @param GCPtrMem The address of the guest memory.
9680 */
9681DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9682{
9683 /* The lazy approach for now... */
9684 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9685 if ((GCPtrMem & 31) == 0)
9686 {
9687 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9688 pu256Dst->au64[0] = pu256Src->au64[0];
9689 pu256Dst->au64[1] = pu256Src->au64[1];
9690 pu256Dst->au64[2] = pu256Src->au64[2];
9691 pu256Dst->au64[3] = pu256Src->au64[3];
9692 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9693 return;
9694 }
9695
9696 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9697 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9698}
9699#endif
9700
9701
9702
9703/**
9704 * Fetches a descriptor register (lgdt, lidt).
9705 *
9706 * @returns Strict VBox status code.
9707 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9708 * @param pcbLimit Where to return the limit.
9709 * @param pGCPtrBase Where to return the base.
9710 * @param iSegReg The index of the segment register to use for
9711 * this access. The base and limits are checked.
9712 * @param GCPtrMem The address of the guest memory.
9713 * @param enmOpSize The effective operand size.
9714 */
9715IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9716 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9717{
9718 /*
9719 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9720 * little special:
9721 * - The two reads are done separately.
9722 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9723 * - We suspect the 386 to actually commit the limit before the base in
9724 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9725 * don't try emulate this eccentric behavior, because it's not well
9726 * enough understood and rather hard to trigger.
9727 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9728 */
9729 VBOXSTRICTRC rcStrict;
9730 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9731 {
9732 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9733 if (rcStrict == VINF_SUCCESS)
9734 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9735 }
9736 else
9737 {
9738 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9739 if (enmOpSize == IEMMODE_32BIT)
9740 {
9741 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9742 {
9743 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9744 if (rcStrict == VINF_SUCCESS)
9745 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9746 }
9747 else
9748 {
9749 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9750 if (rcStrict == VINF_SUCCESS)
9751 {
9752 *pcbLimit = (uint16_t)uTmp;
9753 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9754 }
9755 }
9756 if (rcStrict == VINF_SUCCESS)
9757 *pGCPtrBase = uTmp;
9758 }
9759 else
9760 {
9761 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9762 if (rcStrict == VINF_SUCCESS)
9763 {
9764 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9765 if (rcStrict == VINF_SUCCESS)
9766 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9767 }
9768 }
9769 }
9770 return rcStrict;
9771}
9772
9773
9774
9775/**
9776 * Stores a data byte.
9777 *
9778 * @returns Strict VBox status code.
9779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9780 * @param iSegReg The index of the segment register to use for
9781 * this access. The base and limits are checked.
9782 * @param GCPtrMem The address of the guest memory.
9783 * @param u8Value The value to store.
9784 */
9785IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9786{
9787 /* The lazy approach for now... */
9788 uint8_t *pu8Dst;
9789 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9790 if (rc == VINF_SUCCESS)
9791 {
9792 *pu8Dst = u8Value;
9793 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9794 }
9795 return rc;
9796}
9797
9798
9799#ifdef IEM_WITH_SETJMP
9800/**
9801 * Stores a data byte, longjmp on error.
9802 *
9803 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9804 * @param iSegReg The index of the segment register to use for
9805 * this access. The base and limits are checked.
9806 * @param GCPtrMem The address of the guest memory.
9807 * @param u8Value The value to store.
9808 */
9809IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9810{
9811 /* The lazy approach for now... */
9812 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9813 *pu8Dst = u8Value;
9814 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9815}
9816#endif
9817
9818
9819/**
9820 * Stores a data word.
9821 *
9822 * @returns Strict VBox status code.
9823 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9824 * @param iSegReg The index of the segment register to use for
9825 * this access. The base and limits are checked.
9826 * @param GCPtrMem The address of the guest memory.
9827 * @param u16Value The value to store.
9828 */
9829IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9830{
9831 /* The lazy approach for now... */
9832 uint16_t *pu16Dst;
9833 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9834 if (rc == VINF_SUCCESS)
9835 {
9836 *pu16Dst = u16Value;
9837 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9838 }
9839 return rc;
9840}
9841
9842
9843#ifdef IEM_WITH_SETJMP
9844/**
9845 * Stores a data word, longjmp on error.
9846 *
9847 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9848 * @param iSegReg The index of the segment register to use for
9849 * this access. The base and limits are checked.
9850 * @param GCPtrMem The address of the guest memory.
9851 * @param u16Value The value to store.
9852 */
9853IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9854{
9855 /* The lazy approach for now... */
9856 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9857 *pu16Dst = u16Value;
9858 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9859}
9860#endif
9861
9862
9863/**
9864 * Stores a data dword.
9865 *
9866 * @returns Strict VBox status code.
9867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9868 * @param iSegReg The index of the segment register to use for
9869 * this access. The base and limits are checked.
9870 * @param GCPtrMem The address of the guest memory.
9871 * @param u32Value The value to store.
9872 */
9873IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9874{
9875 /* The lazy approach for now... */
9876 uint32_t *pu32Dst;
9877 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9878 if (rc == VINF_SUCCESS)
9879 {
9880 *pu32Dst = u32Value;
9881 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9882 }
9883 return rc;
9884}
9885
9886
9887#ifdef IEM_WITH_SETJMP
9888/**
9889 * Stores a data dword.
9890 *
9891 * @returns Strict VBox status code.
9892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9893 * @param iSegReg The index of the segment register to use for
9894 * this access. The base and limits are checked.
9895 * @param GCPtrMem The address of the guest memory.
9896 * @param u32Value The value to store.
9897 */
9898IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9899{
9900 /* The lazy approach for now... */
9901 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9902 *pu32Dst = u32Value;
9903 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9904}
9905#endif
9906
9907
9908/**
9909 * Stores a data qword.
9910 *
9911 * @returns Strict VBox status code.
9912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9913 * @param iSegReg The index of the segment register to use for
9914 * this access. The base and limits are checked.
9915 * @param GCPtrMem The address of the guest memory.
9916 * @param u64Value The value to store.
9917 */
9918IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9919{
9920 /* The lazy approach for now... */
9921 uint64_t *pu64Dst;
9922 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9923 if (rc == VINF_SUCCESS)
9924 {
9925 *pu64Dst = u64Value;
9926 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9927 }
9928 return rc;
9929}
9930
9931
9932#ifdef IEM_WITH_SETJMP
9933/**
9934 * Stores a data qword, longjmp on error.
9935 *
9936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9937 * @param iSegReg The index of the segment register to use for
9938 * this access. The base and limits are checked.
9939 * @param GCPtrMem The address of the guest memory.
9940 * @param u64Value The value to store.
9941 */
9942IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9943{
9944 /* The lazy approach for now... */
9945 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9946 *pu64Dst = u64Value;
9947 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9948}
9949#endif
9950
9951
9952/**
9953 * Stores a data dqword.
9954 *
9955 * @returns Strict VBox status code.
9956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9957 * @param iSegReg The index of the segment register to use for
9958 * this access. The base and limits are checked.
9959 * @param GCPtrMem The address of the guest memory.
9960 * @param u128Value The value to store.
9961 */
9962IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9963{
9964 /* The lazy approach for now... */
9965 PRTUINT128U pu128Dst;
9966 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9967 if (rc == VINF_SUCCESS)
9968 {
9969 pu128Dst->au64[0] = u128Value.au64[0];
9970 pu128Dst->au64[1] = u128Value.au64[1];
9971 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9972 }
9973 return rc;
9974}
9975
9976
9977#ifdef IEM_WITH_SETJMP
9978/**
9979 * Stores a data dqword, longjmp on error.
9980 *
9981 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9982 * @param iSegReg The index of the segment register to use for
9983 * this access. The base and limits are checked.
9984 * @param GCPtrMem The address of the guest memory.
9985 * @param u128Value The value to store.
9986 */
9987IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
9988{
9989 /* The lazy approach for now... */
9990 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9991 pu128Dst->au64[0] = u128Value.au64[0];
9992 pu128Dst->au64[1] = u128Value.au64[1];
9993 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
9994}
9995#endif
9996
9997
9998/**
9999 * Stores a data dqword, SSE aligned.
10000 *
10001 * @returns Strict VBox status code.
10002 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10003 * @param iSegReg The index of the segment register to use for
10004 * this access. The base and limits are checked.
10005 * @param GCPtrMem The address of the guest memory.
10006 * @param u128Value The value to store.
10007 */
10008IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10009{
10010 /* The lazy approach for now... */
10011 if ( (GCPtrMem & 15)
10012 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10013 return iemRaiseGeneralProtectionFault0(pVCpu);
10014
10015 PRTUINT128U pu128Dst;
10016 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10017 if (rc == VINF_SUCCESS)
10018 {
10019 pu128Dst->au64[0] = u128Value.au64[0];
10020 pu128Dst->au64[1] = u128Value.au64[1];
10021 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10022 }
10023 return rc;
10024}
10025
10026
10027#ifdef IEM_WITH_SETJMP
10028/**
10029 * Stores a data dqword, SSE aligned.
10030 *
10031 * @returns Strict VBox status code.
10032 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10033 * @param iSegReg The index of the segment register to use for
10034 * this access. The base and limits are checked.
10035 * @param GCPtrMem The address of the guest memory.
10036 * @param u128Value The value to store.
10037 */
10038DECL_NO_INLINE(IEM_STATIC, void)
10039iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10040{
10041 /* The lazy approach for now... */
10042 if ( (GCPtrMem & 15) == 0
10043 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10044 {
10045 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10046 pu128Dst->au64[0] = u128Value.au64[0];
10047 pu128Dst->au64[1] = u128Value.au64[1];
10048 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10049 return;
10050 }
10051
10052 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10053 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10054}
10055#endif
10056
10057
10058/**
10059 * Stores a data dqword.
10060 *
10061 * @returns Strict VBox status code.
10062 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10063 * @param iSegReg The index of the segment register to use for
10064 * this access. The base and limits are checked.
10065 * @param GCPtrMem The address of the guest memory.
10066 * @param pu256Value Pointer to the value to store.
10067 */
10068IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10069{
10070 /* The lazy approach for now... */
10071 PRTUINT256U pu256Dst;
10072 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10073 if (rc == VINF_SUCCESS)
10074 {
10075 pu256Dst->au64[0] = pu256Value->au64[0];
10076 pu256Dst->au64[1] = pu256Value->au64[1];
10077 pu256Dst->au64[2] = pu256Value->au64[2];
10078 pu256Dst->au64[3] = pu256Value->au64[3];
10079 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10080 }
10081 return rc;
10082}
10083
10084
10085#ifdef IEM_WITH_SETJMP
10086/**
10087 * Stores a data dqword, longjmp on error.
10088 *
10089 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10090 * @param iSegReg The index of the segment register to use for
10091 * this access. The base and limits are checked.
10092 * @param GCPtrMem The address of the guest memory.
10093 * @param pu256Value Pointer to the value to store.
10094 */
10095IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10096{
10097 /* The lazy approach for now... */
10098 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10099 pu256Dst->au64[0] = pu256Value->au64[0];
10100 pu256Dst->au64[1] = pu256Value->au64[1];
10101 pu256Dst->au64[2] = pu256Value->au64[2];
10102 pu256Dst->au64[3] = pu256Value->au64[3];
10103 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10104}
10105#endif
10106
10107
10108/**
10109 * Stores a data dqword, AVX aligned.
10110 *
10111 * @returns Strict VBox status code.
10112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10113 * @param iSegReg The index of the segment register to use for
10114 * this access. The base and limits are checked.
10115 * @param GCPtrMem The address of the guest memory.
10116 * @param pu256Value Pointer to the value to store.
10117 */
10118IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10119{
10120 /* The lazy approach for now... */
10121 if (GCPtrMem & 31)
10122 return iemRaiseGeneralProtectionFault0(pVCpu);
10123
10124 PRTUINT256U pu256Dst;
10125 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10126 if (rc == VINF_SUCCESS)
10127 {
10128 pu256Dst->au64[0] = pu256Value->au64[0];
10129 pu256Dst->au64[1] = pu256Value->au64[1];
10130 pu256Dst->au64[2] = pu256Value->au64[2];
10131 pu256Dst->au64[3] = pu256Value->au64[3];
10132 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10133 }
10134 return rc;
10135}
10136
10137
10138#ifdef IEM_WITH_SETJMP
10139/**
10140 * Stores a data dqword, AVX aligned.
10141 *
10142 * @returns Strict VBox status code.
10143 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10144 * @param iSegReg The index of the segment register to use for
10145 * this access. The base and limits are checked.
10146 * @param GCPtrMem The address of the guest memory.
10147 * @param pu256Value Pointer to the value to store.
10148 */
10149DECL_NO_INLINE(IEM_STATIC, void)
10150iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10151{
10152 /* The lazy approach for now... */
10153 if ((GCPtrMem & 31) == 0)
10154 {
10155 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10156 pu256Dst->au64[0] = pu256Value->au64[0];
10157 pu256Dst->au64[1] = pu256Value->au64[1];
10158 pu256Dst->au64[2] = pu256Value->au64[2];
10159 pu256Dst->au64[3] = pu256Value->au64[3];
10160 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10161 return;
10162 }
10163
10164 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10165 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10166}
10167#endif
10168
10169
10170/**
10171 * Stores a descriptor register (sgdt, sidt).
10172 *
10173 * @returns Strict VBox status code.
10174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10175 * @param cbLimit The limit.
10176 * @param GCPtrBase The base address.
10177 * @param iSegReg The index of the segment register to use for
10178 * this access. The base and limits are checked.
10179 * @param GCPtrMem The address of the guest memory.
10180 */
10181IEM_STATIC VBOXSTRICTRC
10182iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10183{
10184 /*
10185 * The SIDT and SGDT instructions actually stores the data using two
10186 * independent writes. The instructions does not respond to opsize prefixes.
10187 */
10188 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10189 if (rcStrict == VINF_SUCCESS)
10190 {
10191 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10192 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10193 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10194 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10195 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10196 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10197 else
10198 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10199 }
10200 return rcStrict;
10201}
10202
10203
10204/**
10205 * Pushes a word onto the stack.
10206 *
10207 * @returns Strict VBox status code.
10208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10209 * @param u16Value The value to push.
10210 */
10211IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10212{
10213 /* Increment the stack pointer. */
10214 uint64_t uNewRsp;
10215 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10216
10217 /* Write the word the lazy way. */
10218 uint16_t *pu16Dst;
10219 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10220 if (rc == VINF_SUCCESS)
10221 {
10222 *pu16Dst = u16Value;
10223 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10224 }
10225
10226 /* Commit the new RSP value unless we an access handler made trouble. */
10227 if (rc == VINF_SUCCESS)
10228 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10229
10230 return rc;
10231}
10232
10233
10234/**
10235 * Pushes a dword onto the stack.
10236 *
10237 * @returns Strict VBox status code.
10238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10239 * @param u32Value The value to push.
10240 */
10241IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10242{
10243 /* Increment the stack pointer. */
10244 uint64_t uNewRsp;
10245 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10246
10247 /* Write the dword the lazy way. */
10248 uint32_t *pu32Dst;
10249 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10250 if (rc == VINF_SUCCESS)
10251 {
10252 *pu32Dst = u32Value;
10253 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10254 }
10255
10256 /* Commit the new RSP value unless we an access handler made trouble. */
10257 if (rc == VINF_SUCCESS)
10258 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10259
10260 return rc;
10261}
10262
10263
10264/**
10265 * Pushes a dword segment register value onto the stack.
10266 *
10267 * @returns Strict VBox status code.
10268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10269 * @param u32Value The value to push.
10270 */
10271IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10272{
10273 /* Increment the stack pointer. */
10274 uint64_t uNewRsp;
10275 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10276
10277 /* The intel docs talks about zero extending the selector register
10278 value. My actual intel CPU here might be zero extending the value
10279 but it still only writes the lower word... */
10280 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10281 * happens when crossing an electric page boundrary, is the high word checked
10282 * for write accessibility or not? Probably it is. What about segment limits?
10283 * It appears this behavior is also shared with trap error codes.
10284 *
10285 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10286 * ancient hardware when it actually did change. */
10287 uint16_t *pu16Dst;
10288 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10289 if (rc == VINF_SUCCESS)
10290 {
10291 *pu16Dst = (uint16_t)u32Value;
10292 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10293 }
10294
10295 /* Commit the new RSP value unless we an access handler made trouble. */
10296 if (rc == VINF_SUCCESS)
10297 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10298
10299 return rc;
10300}
10301
10302
10303/**
10304 * Pushes a qword onto the stack.
10305 *
10306 * @returns Strict VBox status code.
10307 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10308 * @param u64Value The value to push.
10309 */
10310IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10311{
10312 /* Increment the stack pointer. */
10313 uint64_t uNewRsp;
10314 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10315
10316 /* Write the word the lazy way. */
10317 uint64_t *pu64Dst;
10318 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10319 if (rc == VINF_SUCCESS)
10320 {
10321 *pu64Dst = u64Value;
10322 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10323 }
10324
10325 /* Commit the new RSP value unless we an access handler made trouble. */
10326 if (rc == VINF_SUCCESS)
10327 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10328
10329 return rc;
10330}
10331
10332
10333/**
10334 * Pops a word from the stack.
10335 *
10336 * @returns Strict VBox status code.
10337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10338 * @param pu16Value Where to store the popped value.
10339 */
10340IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10341{
10342 /* Increment the stack pointer. */
10343 uint64_t uNewRsp;
10344 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10345
10346 /* Write the word the lazy way. */
10347 uint16_t const *pu16Src;
10348 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10349 if (rc == VINF_SUCCESS)
10350 {
10351 *pu16Value = *pu16Src;
10352 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10353
10354 /* Commit the new RSP value. */
10355 if (rc == VINF_SUCCESS)
10356 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10357 }
10358
10359 return rc;
10360}
10361
10362
10363/**
10364 * Pops a dword from the stack.
10365 *
10366 * @returns Strict VBox status code.
10367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10368 * @param pu32Value Where to store the popped value.
10369 */
10370IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10371{
10372 /* Increment the stack pointer. */
10373 uint64_t uNewRsp;
10374 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10375
10376 /* Write the word the lazy way. */
10377 uint32_t const *pu32Src;
10378 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10379 if (rc == VINF_SUCCESS)
10380 {
10381 *pu32Value = *pu32Src;
10382 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10383
10384 /* Commit the new RSP value. */
10385 if (rc == VINF_SUCCESS)
10386 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10387 }
10388
10389 return rc;
10390}
10391
10392
10393/**
10394 * Pops a qword from the stack.
10395 *
10396 * @returns Strict VBox status code.
10397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10398 * @param pu64Value Where to store the popped value.
10399 */
10400IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10401{
10402 /* Increment the stack pointer. */
10403 uint64_t uNewRsp;
10404 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10405
10406 /* Write the word the lazy way. */
10407 uint64_t const *pu64Src;
10408 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10409 if (rc == VINF_SUCCESS)
10410 {
10411 *pu64Value = *pu64Src;
10412 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10413
10414 /* Commit the new RSP value. */
10415 if (rc == VINF_SUCCESS)
10416 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10417 }
10418
10419 return rc;
10420}
10421
10422
10423/**
10424 * Pushes a word onto the stack, using a temporary stack pointer.
10425 *
10426 * @returns Strict VBox status code.
10427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10428 * @param u16Value The value to push.
10429 * @param pTmpRsp Pointer to the temporary stack pointer.
10430 */
10431IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10432{
10433 /* Increment the stack pointer. */
10434 RTUINT64U NewRsp = *pTmpRsp;
10435 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10436
10437 /* Write the word the lazy way. */
10438 uint16_t *pu16Dst;
10439 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10440 if (rc == VINF_SUCCESS)
10441 {
10442 *pu16Dst = u16Value;
10443 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10444 }
10445
10446 /* Commit the new RSP value unless we an access handler made trouble. */
10447 if (rc == VINF_SUCCESS)
10448 *pTmpRsp = NewRsp;
10449
10450 return rc;
10451}
10452
10453
10454/**
10455 * Pushes a dword onto the stack, using a temporary stack pointer.
10456 *
10457 * @returns Strict VBox status code.
10458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10459 * @param u32Value The value to push.
10460 * @param pTmpRsp Pointer to the temporary stack pointer.
10461 */
10462IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10463{
10464 /* Increment the stack pointer. */
10465 RTUINT64U NewRsp = *pTmpRsp;
10466 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10467
10468 /* Write the word the lazy way. */
10469 uint32_t *pu32Dst;
10470 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10471 if (rc == VINF_SUCCESS)
10472 {
10473 *pu32Dst = u32Value;
10474 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10475 }
10476
10477 /* Commit the new RSP value unless we an access handler made trouble. */
10478 if (rc == VINF_SUCCESS)
10479 *pTmpRsp = NewRsp;
10480
10481 return rc;
10482}
10483
10484
10485/**
10486 * Pushes a dword onto the stack, using a temporary stack pointer.
10487 *
10488 * @returns Strict VBox status code.
10489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10490 * @param u64Value The value to push.
10491 * @param pTmpRsp Pointer to the temporary stack pointer.
10492 */
10493IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10494{
10495 /* Increment the stack pointer. */
10496 RTUINT64U NewRsp = *pTmpRsp;
10497 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10498
10499 /* Write the word the lazy way. */
10500 uint64_t *pu64Dst;
10501 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10502 if (rc == VINF_SUCCESS)
10503 {
10504 *pu64Dst = u64Value;
10505 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10506 }
10507
10508 /* Commit the new RSP value unless we an access handler made trouble. */
10509 if (rc == VINF_SUCCESS)
10510 *pTmpRsp = NewRsp;
10511
10512 return rc;
10513}
10514
10515
10516/**
10517 * Pops a word from the stack, using a temporary stack pointer.
10518 *
10519 * @returns Strict VBox status code.
10520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10521 * @param pu16Value Where to store the popped value.
10522 * @param pTmpRsp Pointer to the temporary stack pointer.
10523 */
10524IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10525{
10526 /* Increment the stack pointer. */
10527 RTUINT64U NewRsp = *pTmpRsp;
10528 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10529
10530 /* Write the word the lazy way. */
10531 uint16_t const *pu16Src;
10532 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10533 if (rc == VINF_SUCCESS)
10534 {
10535 *pu16Value = *pu16Src;
10536 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10537
10538 /* Commit the new RSP value. */
10539 if (rc == VINF_SUCCESS)
10540 *pTmpRsp = NewRsp;
10541 }
10542
10543 return rc;
10544}
10545
10546
10547/**
10548 * Pops a dword from the stack, using a temporary stack pointer.
10549 *
10550 * @returns Strict VBox status code.
10551 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10552 * @param pu32Value Where to store the popped value.
10553 * @param pTmpRsp Pointer to the temporary stack pointer.
10554 */
10555IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10556{
10557 /* Increment the stack pointer. */
10558 RTUINT64U NewRsp = *pTmpRsp;
10559 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10560
10561 /* Write the word the lazy way. */
10562 uint32_t const *pu32Src;
10563 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10564 if (rc == VINF_SUCCESS)
10565 {
10566 *pu32Value = *pu32Src;
10567 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10568
10569 /* Commit the new RSP value. */
10570 if (rc == VINF_SUCCESS)
10571 *pTmpRsp = NewRsp;
10572 }
10573
10574 return rc;
10575}
10576
10577
10578/**
10579 * Pops a qword from the stack, using a temporary stack pointer.
10580 *
10581 * @returns Strict VBox status code.
10582 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10583 * @param pu64Value Where to store the popped value.
10584 * @param pTmpRsp Pointer to the temporary stack pointer.
10585 */
10586IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10587{
10588 /* Increment the stack pointer. */
10589 RTUINT64U NewRsp = *pTmpRsp;
10590 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10591
10592 /* Write the word the lazy way. */
10593 uint64_t const *pu64Src;
10594 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10595 if (rcStrict == VINF_SUCCESS)
10596 {
10597 *pu64Value = *pu64Src;
10598 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10599
10600 /* Commit the new RSP value. */
10601 if (rcStrict == VINF_SUCCESS)
10602 *pTmpRsp = NewRsp;
10603 }
10604
10605 return rcStrict;
10606}
10607
10608
10609/**
10610 * Begin a special stack push (used by interrupt, exceptions and such).
10611 *
10612 * This will raise \#SS or \#PF if appropriate.
10613 *
10614 * @returns Strict VBox status code.
10615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10616 * @param cbMem The number of bytes to push onto the stack.
10617 * @param ppvMem Where to return the pointer to the stack memory.
10618 * As with the other memory functions this could be
10619 * direct access or bounce buffered access, so
10620 * don't commit register until the commit call
10621 * succeeds.
10622 * @param puNewRsp Where to return the new RSP value. This must be
10623 * passed unchanged to
10624 * iemMemStackPushCommitSpecial().
10625 */
10626IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10627{
10628 Assert(cbMem < UINT8_MAX);
10629 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10630 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10631}
10632
10633
10634/**
10635 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10636 *
10637 * This will update the rSP.
10638 *
10639 * @returns Strict VBox status code.
10640 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10641 * @param pvMem The pointer returned by
10642 * iemMemStackPushBeginSpecial().
10643 * @param uNewRsp The new RSP value returned by
10644 * iemMemStackPushBeginSpecial().
10645 */
10646IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10647{
10648 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10649 if (rcStrict == VINF_SUCCESS)
10650 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10651 return rcStrict;
10652}
10653
10654
10655/**
10656 * Begin a special stack pop (used by iret, retf and such).
10657 *
10658 * This will raise \#SS or \#PF if appropriate.
10659 *
10660 * @returns Strict VBox status code.
10661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10662 * @param cbMem The number of bytes to pop from the stack.
10663 * @param ppvMem Where to return the pointer to the stack memory.
10664 * @param puNewRsp Where to return the new RSP value. This must be
10665 * assigned to CPUMCTX::rsp manually some time
10666 * after iemMemStackPopDoneSpecial() has been
10667 * called.
10668 */
10669IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10670{
10671 Assert(cbMem < UINT8_MAX);
10672 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10673 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10674}
10675
10676
10677/**
10678 * Continue a special stack pop (used by iret and retf).
10679 *
10680 * This will raise \#SS or \#PF if appropriate.
10681 *
10682 * @returns Strict VBox status code.
10683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10684 * @param cbMem The number of bytes to pop from the stack.
10685 * @param ppvMem Where to return the pointer to the stack memory.
10686 * @param puNewRsp Where to return the new RSP value. This must be
10687 * assigned to CPUMCTX::rsp manually some time
10688 * after iemMemStackPopDoneSpecial() has been
10689 * called.
10690 */
10691IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10692{
10693 Assert(cbMem < UINT8_MAX);
10694 RTUINT64U NewRsp;
10695 NewRsp.u = *puNewRsp;
10696 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10697 *puNewRsp = NewRsp.u;
10698 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10699}
10700
10701
10702/**
10703 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10704 * iemMemStackPopContinueSpecial).
10705 *
10706 * The caller will manually commit the rSP.
10707 *
10708 * @returns Strict VBox status code.
10709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10710 * @param pvMem The pointer returned by
10711 * iemMemStackPopBeginSpecial() or
10712 * iemMemStackPopContinueSpecial().
10713 */
10714IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10715{
10716 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10717}
10718
10719
10720/**
10721 * Fetches a system table byte.
10722 *
10723 * @returns Strict VBox status code.
10724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10725 * @param pbDst Where to return the byte.
10726 * @param iSegReg The index of the segment register to use for
10727 * this access. The base and limits are checked.
10728 * @param GCPtrMem The address of the guest memory.
10729 */
10730IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10731{
10732 /* The lazy approach for now... */
10733 uint8_t const *pbSrc;
10734 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10735 if (rc == VINF_SUCCESS)
10736 {
10737 *pbDst = *pbSrc;
10738 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10739 }
10740 return rc;
10741}
10742
10743
10744/**
10745 * Fetches a system table word.
10746 *
10747 * @returns Strict VBox status code.
10748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10749 * @param pu16Dst Where to return the word.
10750 * @param iSegReg The index of the segment register to use for
10751 * this access. The base and limits are checked.
10752 * @param GCPtrMem The address of the guest memory.
10753 */
10754IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10755{
10756 /* The lazy approach for now... */
10757 uint16_t const *pu16Src;
10758 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10759 if (rc == VINF_SUCCESS)
10760 {
10761 *pu16Dst = *pu16Src;
10762 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10763 }
10764 return rc;
10765}
10766
10767
10768/**
10769 * Fetches a system table dword.
10770 *
10771 * @returns Strict VBox status code.
10772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10773 * @param pu32Dst Where to return the dword.
10774 * @param iSegReg The index of the segment register to use for
10775 * this access. The base and limits are checked.
10776 * @param GCPtrMem The address of the guest memory.
10777 */
10778IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10779{
10780 /* The lazy approach for now... */
10781 uint32_t const *pu32Src;
10782 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10783 if (rc == VINF_SUCCESS)
10784 {
10785 *pu32Dst = *pu32Src;
10786 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10787 }
10788 return rc;
10789}
10790
10791
10792/**
10793 * Fetches a system table qword.
10794 *
10795 * @returns Strict VBox status code.
10796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10797 * @param pu64Dst Where to return the qword.
10798 * @param iSegReg The index of the segment register to use for
10799 * this access. The base and limits are checked.
10800 * @param GCPtrMem The address of the guest memory.
10801 */
10802IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10803{
10804 /* The lazy approach for now... */
10805 uint64_t const *pu64Src;
10806 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10807 if (rc == VINF_SUCCESS)
10808 {
10809 *pu64Dst = *pu64Src;
10810 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10811 }
10812 return rc;
10813}
10814
10815
10816/**
10817 * Fetches a descriptor table entry with caller specified error code.
10818 *
10819 * @returns Strict VBox status code.
10820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10821 * @param pDesc Where to return the descriptor table entry.
10822 * @param uSel The selector which table entry to fetch.
10823 * @param uXcpt The exception to raise on table lookup error.
10824 * @param uErrorCode The error code associated with the exception.
10825 */
10826IEM_STATIC VBOXSTRICTRC
10827iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10828{
10829 AssertPtr(pDesc);
10830 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10831
10832 /** @todo did the 286 require all 8 bytes to be accessible? */
10833 /*
10834 * Get the selector table base and check bounds.
10835 */
10836 RTGCPTR GCPtrBase;
10837 if (uSel & X86_SEL_LDT)
10838 {
10839 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10840 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10841 {
10842 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10843 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10844 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10845 uErrorCode, 0);
10846 }
10847
10848 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10849 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10850 }
10851 else
10852 {
10853 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10854 {
10855 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10856 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10857 uErrorCode, 0);
10858 }
10859 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10860 }
10861
10862 /*
10863 * Read the legacy descriptor and maybe the long mode extensions if
10864 * required.
10865 */
10866 VBOXSTRICTRC rcStrict;
10867 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10868 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10869 else
10870 {
10871 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10872 if (rcStrict == VINF_SUCCESS)
10873 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10874 if (rcStrict == VINF_SUCCESS)
10875 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10876 if (rcStrict == VINF_SUCCESS)
10877 pDesc->Legacy.au16[3] = 0;
10878 else
10879 return rcStrict;
10880 }
10881
10882 if (rcStrict == VINF_SUCCESS)
10883 {
10884 if ( !IEM_IS_LONG_MODE(pVCpu)
10885 || pDesc->Legacy.Gen.u1DescType)
10886 pDesc->Long.au64[1] = 0;
10887 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10888 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10889 else
10890 {
10891 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10892 /** @todo is this the right exception? */
10893 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10894 }
10895 }
10896 return rcStrict;
10897}
10898
10899
10900/**
10901 * Fetches a descriptor table entry.
10902 *
10903 * @returns Strict VBox status code.
10904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10905 * @param pDesc Where to return the descriptor table entry.
10906 * @param uSel The selector which table entry to fetch.
10907 * @param uXcpt The exception to raise on table lookup error.
10908 */
10909IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10910{
10911 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10912}
10913
10914
10915/**
10916 * Fakes a long mode stack selector for SS = 0.
10917 *
10918 * @param pDescSs Where to return the fake stack descriptor.
10919 * @param uDpl The DPL we want.
10920 */
10921IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10922{
10923 pDescSs->Long.au64[0] = 0;
10924 pDescSs->Long.au64[1] = 0;
10925 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10926 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10927 pDescSs->Long.Gen.u2Dpl = uDpl;
10928 pDescSs->Long.Gen.u1Present = 1;
10929 pDescSs->Long.Gen.u1Long = 1;
10930}
10931
10932
10933/**
10934 * Marks the selector descriptor as accessed (only non-system descriptors).
10935 *
10936 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
10937 * will therefore skip the limit checks.
10938 *
10939 * @returns Strict VBox status code.
10940 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10941 * @param uSel The selector.
10942 */
10943IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
10944{
10945 /*
10946 * Get the selector table base and calculate the entry address.
10947 */
10948 RTGCPTR GCPtr = uSel & X86_SEL_LDT
10949 ? pVCpu->cpum.GstCtx.ldtr.u64Base
10950 : pVCpu->cpum.GstCtx.gdtr.pGdt;
10951 GCPtr += uSel & X86_SEL_MASK;
10952
10953 /*
10954 * ASMAtomicBitSet will assert if the address is misaligned, so do some
10955 * ugly stuff to avoid this. This will make sure it's an atomic access
10956 * as well more or less remove any question about 8-bit or 32-bit accesss.
10957 */
10958 VBOXSTRICTRC rcStrict;
10959 uint32_t volatile *pu32;
10960 if ((GCPtr & 3) == 0)
10961 {
10962 /* The normal case, map the 32-bit bits around the accessed bit (40). */
10963 GCPtr += 2 + 2;
10964 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10965 if (rcStrict != VINF_SUCCESS)
10966 return rcStrict;
10967 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
10968 }
10969 else
10970 {
10971 /* The misaligned GDT/LDT case, map the whole thing. */
10972 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
10973 if (rcStrict != VINF_SUCCESS)
10974 return rcStrict;
10975 switch ((uintptr_t)pu32 & 3)
10976 {
10977 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
10978 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
10979 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
10980 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
10981 }
10982 }
10983
10984 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
10985}
10986
10987/** @} */
10988
10989
10990/*
10991 * Include the C/C++ implementation of instruction.
10992 */
10993#include "IEMAllCImpl.cpp.h"
10994
10995
10996
10997/** @name "Microcode" macros.
10998 *
10999 * The idea is that we should be able to use the same code to interpret
11000 * instructions as well as recompiler instructions. Thus this obfuscation.
11001 *
11002 * @{
11003 */
11004#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11005#define IEM_MC_END() }
11006#define IEM_MC_PAUSE() do {} while (0)
11007#define IEM_MC_CONTINUE() do {} while (0)
11008
11009/** Internal macro. */
11010#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11011 do \
11012 { \
11013 VBOXSTRICTRC rcStrict2 = a_Expr; \
11014 if (rcStrict2 != VINF_SUCCESS) \
11015 return rcStrict2; \
11016 } while (0)
11017
11018
11019#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11020#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11021#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11022#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11023#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11024#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11025#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11026#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11027#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11028 do { \
11029 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11030 return iemRaiseDeviceNotAvailable(pVCpu); \
11031 } while (0)
11032#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11033 do { \
11034 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11035 return iemRaiseDeviceNotAvailable(pVCpu); \
11036 } while (0)
11037#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11038 do { \
11039 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11040 return iemRaiseMathFault(pVCpu); \
11041 } while (0)
11042#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11043 do { \
11044 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11045 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11046 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11047 return iemRaiseUndefinedOpcode(pVCpu); \
11048 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11049 return iemRaiseDeviceNotAvailable(pVCpu); \
11050 } while (0)
11051#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11052 do { \
11053 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11054 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11055 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11056 return iemRaiseUndefinedOpcode(pVCpu); \
11057 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11058 return iemRaiseDeviceNotAvailable(pVCpu); \
11059 } while (0)
11060#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11061 do { \
11062 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11063 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11064 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11065 return iemRaiseUndefinedOpcode(pVCpu); \
11066 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11067 return iemRaiseDeviceNotAvailable(pVCpu); \
11068 } while (0)
11069#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11070 do { \
11071 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11072 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11073 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11074 return iemRaiseUndefinedOpcode(pVCpu); \
11075 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11076 return iemRaiseDeviceNotAvailable(pVCpu); \
11077 } while (0)
11078#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11079 do { \
11080 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11081 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11082 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11083 return iemRaiseUndefinedOpcode(pVCpu); \
11084 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11085 return iemRaiseDeviceNotAvailable(pVCpu); \
11086 } while (0)
11087#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11088 do { \
11089 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11090 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11091 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11092 return iemRaiseUndefinedOpcode(pVCpu); \
11093 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11094 return iemRaiseDeviceNotAvailable(pVCpu); \
11095 } while (0)
11096#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11097 do { \
11098 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11099 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11100 return iemRaiseUndefinedOpcode(pVCpu); \
11101 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11102 return iemRaiseDeviceNotAvailable(pVCpu); \
11103 } while (0)
11104#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11105 do { \
11106 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11107 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11108 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11109 return iemRaiseUndefinedOpcode(pVCpu); \
11110 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11111 return iemRaiseDeviceNotAvailable(pVCpu); \
11112 } while (0)
11113#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11114 do { \
11115 if (pVCpu->iem.s.uCpl != 0) \
11116 return iemRaiseGeneralProtectionFault0(pVCpu); \
11117 } while (0)
11118#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11119 do { \
11120 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11121 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11122 } while (0)
11123#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11124 do { \
11125 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11126 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11127 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11128 return iemRaiseUndefinedOpcode(pVCpu); \
11129 } while (0)
11130#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11131 do { \
11132 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11133 return iemRaiseGeneralProtectionFault0(pVCpu); \
11134 } while (0)
11135
11136
11137#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11138#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11139#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11140#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11141#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11142#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11143#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11144 uint32_t a_Name; \
11145 uint32_t *a_pName = &a_Name
11146#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11147 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11148
11149#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11150#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11151
11152#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11153#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11154#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11155#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11156#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11157#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11158#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11159#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11160#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11161#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11162#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11163#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11164#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11165#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11166#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11167#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11168#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11169#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11170 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11171 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11172 } while (0)
11173#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11174 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11175 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11176 } while (0)
11177#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11178 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11179 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11180 } while (0)
11181/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11182#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11183 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11184 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11185 } while (0)
11186#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11187 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11188 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11189 } while (0)
11190/** @note Not for IOPL or IF testing or modification. */
11191#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11192#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11193#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11194#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11195
11196#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11197#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11198#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11199#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11200#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11201#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11202#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11203#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11204#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11205#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11206/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11207#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11208 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11209 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11210 } while (0)
11211#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11212 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11213 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11214 } while (0)
11215#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11216 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11217
11218
11219#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11220#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11221/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11222 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11223#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11224#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11225/** @note Not for IOPL or IF testing or modification. */
11226#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11227
11228#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11229#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11230#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11231 do { \
11232 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11233 *pu32Reg += (a_u32Value); \
11234 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11235 } while (0)
11236#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11237
11238#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11239#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11240#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11241 do { \
11242 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11243 *pu32Reg -= (a_u32Value); \
11244 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11245 } while (0)
11246#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11247#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11248
11249#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11250#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11251#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11252#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11253#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11254#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11255#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11256
11257#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11258#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11259#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11260#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11261
11262#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11263#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11264#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11265
11266#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11267#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11268#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11269
11270#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11271#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11272#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11273
11274#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11275#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11276#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11277
11278#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11279
11280#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11281
11282#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11283#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11284#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11285 do { \
11286 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11287 *pu32Reg &= (a_u32Value); \
11288 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11289 } while (0)
11290#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11291
11292#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11293#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11294#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11295 do { \
11296 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11297 *pu32Reg |= (a_u32Value); \
11298 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11299 } while (0)
11300#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11301
11302
11303/** @note Not for IOPL or IF modification. */
11304#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11305/** @note Not for IOPL or IF modification. */
11306#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11307/** @note Not for IOPL or IF modification. */
11308#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11309
11310#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11311
11312/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11313#define IEM_MC_FPU_TO_MMX_MODE() do { \
11314 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11315 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11316 } while (0)
11317
11318/** Switches the FPU state from MMX mode (FTW=0xffff). */
11319#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11320 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11321 } while (0)
11322
11323#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11324 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11325#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11326 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11327#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11328 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11329 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11330 } while (0)
11331#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11332 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11333 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11334 } while (0)
11335#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11336 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11337#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11338 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11339#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11340 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11341
11342#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11343 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11344 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11345 } while (0)
11346#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11347 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11348#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11349 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11350#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11351 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11352#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11353 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11354 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11355 } while (0)
11356#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11357 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11358#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11359 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11360 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11361 } while (0)
11362#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11363 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11364#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11365 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11366 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11367 } while (0)
11368#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11369 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11370#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11371 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11372#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11373 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11374#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11375 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11376#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11377 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11378 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11379 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11380 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11381 } while (0)
11382
11383#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11384 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11385 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11386 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11387 } while (0)
11388#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11389 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11390 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11391 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11392 } while (0)
11393#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11394 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11395 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11396 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11397 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11398 } while (0)
11399#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11400 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11401 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11402 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11403 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11404 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11405 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11406 } while (0)
11407
11408#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11409#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11410 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11411 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11412 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11413 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11414 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11415 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11416 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11417 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11418 } while (0)
11419#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11420 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11421 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11422 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11423 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11424 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11425 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11426 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11427 } while (0)
11428#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11429 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11430 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11431 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11432 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11433 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11434 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11435 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11436 } while (0)
11437#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11438 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11439 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11440 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11441 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11442 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11443 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11444 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11445 } while (0)
11446
11447#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11448 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11449#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11450 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11451#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11452 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11453#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11454 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11455 uintptr_t const iYRegTmp = (a_iYReg); \
11456 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11457 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11458 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11459 } while (0)
11460
11461#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11462 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11463 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11464 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11465 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11466 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11467 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11468 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11469 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11470 } while (0)
11471#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11472 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11473 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11474 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11475 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11476 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11477 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11478 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11479 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11480 } while (0)
11481#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11482 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11483 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11484 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11485 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11486 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11487 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11488 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11489 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11490 } while (0)
11491
11492#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11493 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11494 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11495 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11496 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11497 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11498 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11499 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11500 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11501 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11502 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11503 } while (0)
11504#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11505 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11506 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11507 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11508 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11509 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11510 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11511 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11512 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11513 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11514 } while (0)
11515#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11516 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11517 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11518 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11519 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11520 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11521 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11522 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11523 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11524 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11525 } while (0)
11526#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11527 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11528 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11529 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11530 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11531 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11532 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11533 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11534 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11535 } while (0)
11536
11537#ifndef IEM_WITH_SETJMP
11538# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11539 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11540# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11541 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11542# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11543 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11544#else
11545# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11546 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11547# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11548 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11549# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11550 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11551#endif
11552
11553#ifndef IEM_WITH_SETJMP
11554# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11555 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11556# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11557 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11558# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11559 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11560#else
11561# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11562 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11563# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11564 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11565# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11566 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11567#endif
11568
11569#ifndef IEM_WITH_SETJMP
11570# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11571 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11572# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11573 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11574# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11575 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11576#else
11577# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11578 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11579# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11580 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11581# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11582 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11583#endif
11584
11585#ifdef SOME_UNUSED_FUNCTION
11586# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11587 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11588#endif
11589
11590#ifndef IEM_WITH_SETJMP
11591# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11592 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11593# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11594 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11595# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11596 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11597# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11598 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11599#else
11600# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11601 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11602# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11603 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11604# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11605 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11606# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11607 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11608#endif
11609
11610#ifndef IEM_WITH_SETJMP
11611# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11612 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11613# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11614 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11615# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11616 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11617#else
11618# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11619 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11620# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11621 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11622# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11623 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11624#endif
11625
11626#ifndef IEM_WITH_SETJMP
11627# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11628 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11629# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11630 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11631#else
11632# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11633 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11634# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11635 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11636#endif
11637
11638#ifndef IEM_WITH_SETJMP
11639# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11640 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11641# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11643#else
11644# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11645 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11646# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11647 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11648#endif
11649
11650
11651
11652#ifndef IEM_WITH_SETJMP
11653# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11654 do { \
11655 uint8_t u8Tmp; \
11656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11657 (a_u16Dst) = u8Tmp; \
11658 } while (0)
11659# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11660 do { \
11661 uint8_t u8Tmp; \
11662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11663 (a_u32Dst) = u8Tmp; \
11664 } while (0)
11665# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11666 do { \
11667 uint8_t u8Tmp; \
11668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11669 (a_u64Dst) = u8Tmp; \
11670 } while (0)
11671# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11672 do { \
11673 uint16_t u16Tmp; \
11674 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11675 (a_u32Dst) = u16Tmp; \
11676 } while (0)
11677# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11678 do { \
11679 uint16_t u16Tmp; \
11680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11681 (a_u64Dst) = u16Tmp; \
11682 } while (0)
11683# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11684 do { \
11685 uint32_t u32Tmp; \
11686 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11687 (a_u64Dst) = u32Tmp; \
11688 } while (0)
11689#else /* IEM_WITH_SETJMP */
11690# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11691 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11692# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11693 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11694# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11695 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11696# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11697 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11698# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11699 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11700# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11701 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11702#endif /* IEM_WITH_SETJMP */
11703
11704#ifndef IEM_WITH_SETJMP
11705# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11706 do { \
11707 uint8_t u8Tmp; \
11708 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11709 (a_u16Dst) = (int8_t)u8Tmp; \
11710 } while (0)
11711# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11712 do { \
11713 uint8_t u8Tmp; \
11714 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11715 (a_u32Dst) = (int8_t)u8Tmp; \
11716 } while (0)
11717# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11718 do { \
11719 uint8_t u8Tmp; \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11721 (a_u64Dst) = (int8_t)u8Tmp; \
11722 } while (0)
11723# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11724 do { \
11725 uint16_t u16Tmp; \
11726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11727 (a_u32Dst) = (int16_t)u16Tmp; \
11728 } while (0)
11729# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11730 do { \
11731 uint16_t u16Tmp; \
11732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11733 (a_u64Dst) = (int16_t)u16Tmp; \
11734 } while (0)
11735# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11736 do { \
11737 uint32_t u32Tmp; \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11739 (a_u64Dst) = (int32_t)u32Tmp; \
11740 } while (0)
11741#else /* IEM_WITH_SETJMP */
11742# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11743 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11744# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11745 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11746# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11747 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11749 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11750# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11751 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11752# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11753 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11754#endif /* IEM_WITH_SETJMP */
11755
11756#ifndef IEM_WITH_SETJMP
11757# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11758 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11759# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11760 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11761# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11763# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11764 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11765#else
11766# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11767 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11768# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11769 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11770# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11771 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11772# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11773 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11774#endif
11775
11776#ifndef IEM_WITH_SETJMP
11777# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11778 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11779# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11781# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11783# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11784 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11785#else
11786# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11787 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11788# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11789 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11790# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11791 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11792# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11793 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11794#endif
11795
11796#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11797#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11798#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11799#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11800#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11801#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11802#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11803 do { \
11804 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11805 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11806 } while (0)
11807
11808#ifndef IEM_WITH_SETJMP
11809# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11810 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11811# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11812 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11813#else
11814# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11815 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11816# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11817 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11818#endif
11819
11820#ifndef IEM_WITH_SETJMP
11821# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11822 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11823# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11824 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11825#else
11826# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11827 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11828# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11829 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11830#endif
11831
11832
11833#define IEM_MC_PUSH_U16(a_u16Value) \
11834 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11835#define IEM_MC_PUSH_U32(a_u32Value) \
11836 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11837#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11838 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11839#define IEM_MC_PUSH_U64(a_u64Value) \
11840 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11841
11842#define IEM_MC_POP_U16(a_pu16Value) \
11843 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11844#define IEM_MC_POP_U32(a_pu32Value) \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11846#define IEM_MC_POP_U64(a_pu64Value) \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11848
11849/** Maps guest memory for direct or bounce buffered access.
11850 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11851 * @remarks May return.
11852 */
11853#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11854 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11855
11856/** Maps guest memory for direct or bounce buffered access.
11857 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11858 * @remarks May return.
11859 */
11860#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11862
11863/** Commits the memory and unmaps the guest memory.
11864 * @remarks May return.
11865 */
11866#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11867 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11868
11869/** Commits the memory and unmaps the guest memory unless the FPU status word
11870 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11871 * that would cause FLD not to store.
11872 *
11873 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11874 * store, while \#P will not.
11875 *
11876 * @remarks May in theory return - for now.
11877 */
11878#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11879 do { \
11880 if ( !(a_u16FSW & X86_FSW_ES) \
11881 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11882 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11883 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11884 } while (0)
11885
11886/** Calculate efficient address from R/M. */
11887#ifndef IEM_WITH_SETJMP
11888# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11889 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11890#else
11891# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11892 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11893#endif
11894
11895#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11896#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11897#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11898#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11899#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11900#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11901#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11902
11903/**
11904 * Defers the rest of the instruction emulation to a C implementation routine
11905 * and returns, only taking the standard parameters.
11906 *
11907 * @param a_pfnCImpl The pointer to the C routine.
11908 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11909 */
11910#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11911
11912/**
11913 * Defers the rest of instruction emulation to a C implementation routine and
11914 * returns, taking one argument in addition to the standard ones.
11915 *
11916 * @param a_pfnCImpl The pointer to the C routine.
11917 * @param a0 The argument.
11918 */
11919#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11920
11921/**
11922 * Defers the rest of the instruction emulation to a C implementation routine
11923 * and returns, taking two arguments in addition to the standard ones.
11924 *
11925 * @param a_pfnCImpl The pointer to the C routine.
11926 * @param a0 The first extra argument.
11927 * @param a1 The second extra argument.
11928 */
11929#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11930
11931/**
11932 * Defers the rest of the instruction emulation to a C implementation routine
11933 * and returns, taking three arguments in addition to the standard ones.
11934 *
11935 * @param a_pfnCImpl The pointer to the C routine.
11936 * @param a0 The first extra argument.
11937 * @param a1 The second extra argument.
11938 * @param a2 The third extra argument.
11939 */
11940#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11941
11942/**
11943 * Defers the rest of the instruction emulation to a C implementation routine
11944 * and returns, taking four arguments in addition to the standard ones.
11945 *
11946 * @param a_pfnCImpl The pointer to the C routine.
11947 * @param a0 The first extra argument.
11948 * @param a1 The second extra argument.
11949 * @param a2 The third extra argument.
11950 * @param a3 The fourth extra argument.
11951 */
11952#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
11953
11954/**
11955 * Defers the rest of the instruction emulation to a C implementation routine
11956 * and returns, taking two arguments in addition to the standard ones.
11957 *
11958 * @param a_pfnCImpl The pointer to the C routine.
11959 * @param a0 The first extra argument.
11960 * @param a1 The second extra argument.
11961 * @param a2 The third extra argument.
11962 * @param a3 The fourth extra argument.
11963 * @param a4 The fifth extra argument.
11964 */
11965#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
11966
11967/**
11968 * Defers the entire instruction emulation to a C implementation routine and
11969 * returns, only taking the standard parameters.
11970 *
11971 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11972 *
11973 * @param a_pfnCImpl The pointer to the C routine.
11974 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11975 */
11976#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11977
11978/**
11979 * Defers the entire instruction emulation to a C implementation routine and
11980 * returns, taking one argument in addition to the standard ones.
11981 *
11982 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11983 *
11984 * @param a_pfnCImpl The pointer to the C routine.
11985 * @param a0 The argument.
11986 */
11987#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11988
11989/**
11990 * Defers the entire instruction emulation to a C implementation routine and
11991 * returns, taking two arguments in addition to the standard ones.
11992 *
11993 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
11994 *
11995 * @param a_pfnCImpl The pointer to the C routine.
11996 * @param a0 The first extra argument.
11997 * @param a1 The second extra argument.
11998 */
11999#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12000
12001/**
12002 * Defers the entire instruction emulation to a C implementation routine and
12003 * returns, taking three arguments in addition to the standard ones.
12004 *
12005 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12006 *
12007 * @param a_pfnCImpl The pointer to the C routine.
12008 * @param a0 The first extra argument.
12009 * @param a1 The second extra argument.
12010 * @param a2 The third extra argument.
12011 */
12012#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12013
12014/**
12015 * Calls a FPU assembly implementation taking one visible argument.
12016 *
12017 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12018 * @param a0 The first extra argument.
12019 */
12020#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12021 do { \
12022 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12023 } while (0)
12024
12025/**
12026 * Calls a FPU assembly implementation taking two visible arguments.
12027 *
12028 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12029 * @param a0 The first extra argument.
12030 * @param a1 The second extra argument.
12031 */
12032#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12033 do { \
12034 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12035 } while (0)
12036
12037/**
12038 * Calls a FPU assembly implementation taking three visible arguments.
12039 *
12040 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12041 * @param a0 The first extra argument.
12042 * @param a1 The second extra argument.
12043 * @param a2 The third extra argument.
12044 */
12045#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12046 do { \
12047 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12048 } while (0)
12049
12050#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12051 do { \
12052 (a_FpuData).FSW = (a_FSW); \
12053 (a_FpuData).r80Result = *(a_pr80Value); \
12054 } while (0)
12055
12056/** Pushes FPU result onto the stack. */
12057#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12058 iemFpuPushResult(pVCpu, &a_FpuData)
12059/** Pushes FPU result onto the stack and sets the FPUDP. */
12060#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12061 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12062
12063/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12064#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12065 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12066
12067/** Stores FPU result in a stack register. */
12068#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12069 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12070/** Stores FPU result in a stack register and pops the stack. */
12071#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12072 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12073/** Stores FPU result in a stack register and sets the FPUDP. */
12074#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12075 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12076/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12077 * stack. */
12078#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12079 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12080
12081/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12082#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12083 iemFpuUpdateOpcodeAndIp(pVCpu)
12084/** Free a stack register (for FFREE and FFREEP). */
12085#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12086 iemFpuStackFree(pVCpu, a_iStReg)
12087/** Increment the FPU stack pointer. */
12088#define IEM_MC_FPU_STACK_INC_TOP() \
12089 iemFpuStackIncTop(pVCpu)
12090/** Decrement the FPU stack pointer. */
12091#define IEM_MC_FPU_STACK_DEC_TOP() \
12092 iemFpuStackDecTop(pVCpu)
12093
12094/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12095#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12096 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12097/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12098#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12099 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12100/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12101#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12102 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12103/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12104#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12105 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12106/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12107 * stack. */
12108#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12109 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12110/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12111#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12112 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12113
12114/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12115#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12116 iemFpuStackUnderflow(pVCpu, a_iStDst)
12117/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12118 * stack. */
12119#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12120 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12121/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12122 * FPUDS. */
12123#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12124 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12125/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12126 * FPUDS. Pops stack. */
12127#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12128 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12129/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12130 * stack twice. */
12131#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12132 iemFpuStackUnderflowThenPopPop(pVCpu)
12133/** Raises a FPU stack underflow exception for an instruction pushing a result
12134 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12135#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12136 iemFpuStackPushUnderflow(pVCpu)
12137/** Raises a FPU stack underflow exception for an instruction pushing a result
12138 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12139#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12140 iemFpuStackPushUnderflowTwo(pVCpu)
12141
12142/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12143 * FPUIP, FPUCS and FOP. */
12144#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12145 iemFpuStackPushOverflow(pVCpu)
12146/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12147 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12148#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12149 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12150/** Prepares for using the FPU state.
12151 * Ensures that we can use the host FPU in the current context (RC+R0.
12152 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12153#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12154/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12155#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12156/** Actualizes the guest FPU state so it can be accessed and modified. */
12157#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12158
12159/** Prepares for using the SSE state.
12160 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12161 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12162#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12163/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12164#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12165/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12166#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12167
12168/** Prepares for using the AVX state.
12169 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12170 * Ensures the guest AVX state in the CPUMCTX is up to date.
12171 * @note This will include the AVX512 state too when support for it is added
12172 * due to the zero extending feature of VEX instruction. */
12173#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12174/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12175#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12176/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12177#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12178
12179/**
12180 * Calls a MMX assembly implementation taking two visible arguments.
12181 *
12182 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12183 * @param a0 The first extra argument.
12184 * @param a1 The second extra argument.
12185 */
12186#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12187 do { \
12188 IEM_MC_PREPARE_FPU_USAGE(); \
12189 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12190 } while (0)
12191
12192/**
12193 * Calls a MMX assembly implementation taking three visible arguments.
12194 *
12195 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12196 * @param a0 The first extra argument.
12197 * @param a1 The second extra argument.
12198 * @param a2 The third extra argument.
12199 */
12200#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12201 do { \
12202 IEM_MC_PREPARE_FPU_USAGE(); \
12203 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12204 } while (0)
12205
12206
12207/**
12208 * Calls a SSE assembly implementation taking two visible arguments.
12209 *
12210 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12211 * @param a0 The first extra argument.
12212 * @param a1 The second extra argument.
12213 */
12214#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12215 do { \
12216 IEM_MC_PREPARE_SSE_USAGE(); \
12217 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12218 } while (0)
12219
12220/**
12221 * Calls a SSE assembly implementation taking three visible arguments.
12222 *
12223 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12224 * @param a0 The first extra argument.
12225 * @param a1 The second extra argument.
12226 * @param a2 The third extra argument.
12227 */
12228#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12229 do { \
12230 IEM_MC_PREPARE_SSE_USAGE(); \
12231 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12232 } while (0)
12233
12234
12235/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12236 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12237#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12238 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12239
12240/**
12241 * Calls a AVX assembly implementation taking two visible arguments.
12242 *
12243 * There is one implicit zero'th argument, a pointer to the extended state.
12244 *
12245 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12246 * @param a1 The first extra argument.
12247 * @param a2 The second extra argument.
12248 */
12249#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12250 do { \
12251 IEM_MC_PREPARE_AVX_USAGE(); \
12252 a_pfnAImpl(pXState, (a1), (a2)); \
12253 } while (0)
12254
12255/**
12256 * Calls a AVX assembly implementation taking three visible arguments.
12257 *
12258 * There is one implicit zero'th argument, a pointer to the extended state.
12259 *
12260 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12261 * @param a1 The first extra argument.
12262 * @param a2 The second extra argument.
12263 * @param a3 The third extra argument.
12264 */
12265#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12266 do { \
12267 IEM_MC_PREPARE_AVX_USAGE(); \
12268 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12269 } while (0)
12270
12271/** @note Not for IOPL or IF testing. */
12272#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12273/** @note Not for IOPL or IF testing. */
12274#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12275/** @note Not for IOPL or IF testing. */
12276#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12277/** @note Not for IOPL or IF testing. */
12278#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12279/** @note Not for IOPL or IF testing. */
12280#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12281 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12282 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12283/** @note Not for IOPL or IF testing. */
12284#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12285 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12286 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12287/** @note Not for IOPL or IF testing. */
12288#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12289 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12290 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12291 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12292/** @note Not for IOPL or IF testing. */
12293#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12294 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12295 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12296 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12297#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12298#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12299#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12300/** @note Not for IOPL or IF testing. */
12301#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12302 if ( pVCpu->cpum.GstCtx.cx != 0 \
12303 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12304/** @note Not for IOPL or IF testing. */
12305#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12306 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12307 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12308/** @note Not for IOPL or IF testing. */
12309#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12310 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12311 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12312/** @note Not for IOPL or IF testing. */
12313#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12314 if ( pVCpu->cpum.GstCtx.cx != 0 \
12315 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12316/** @note Not for IOPL or IF testing. */
12317#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12318 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12319 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12320/** @note Not for IOPL or IF testing. */
12321#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12322 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12323 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12324#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12325#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12326
12327#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12328 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12329#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12330 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12331#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12332 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12333#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12334 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12335#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12336 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12337#define IEM_MC_IF_FCW_IM() \
12338 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12339
12340#define IEM_MC_ELSE() } else {
12341#define IEM_MC_ENDIF() } do {} while (0)
12342
12343/** @} */
12344
12345
12346/** @name Opcode Debug Helpers.
12347 * @{
12348 */
12349#ifdef VBOX_WITH_STATISTICS
12350# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12351#else
12352# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12353#endif
12354
12355#ifdef DEBUG
12356# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12357 do { \
12358 IEMOP_INC_STATS(a_Stats); \
12359 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12360 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12361 } while (0)
12362
12363# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12364 do { \
12365 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12366 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12367 (void)RT_CONCAT(OP_,a_Upper); \
12368 (void)(a_fDisHints); \
12369 (void)(a_fIemHints); \
12370 } while (0)
12371
12372# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12373 do { \
12374 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12375 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12376 (void)RT_CONCAT(OP_,a_Upper); \
12377 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12378 (void)(a_fDisHints); \
12379 (void)(a_fIemHints); \
12380 } while (0)
12381
12382# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12383 do { \
12384 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12385 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12386 (void)RT_CONCAT(OP_,a_Upper); \
12387 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12388 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12389 (void)(a_fDisHints); \
12390 (void)(a_fIemHints); \
12391 } while (0)
12392
12393# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12394 do { \
12395 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12396 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12397 (void)RT_CONCAT(OP_,a_Upper); \
12398 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12399 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12400 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12401 (void)(a_fDisHints); \
12402 (void)(a_fIemHints); \
12403 } while (0)
12404
12405# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12406 do { \
12407 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12408 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12409 (void)RT_CONCAT(OP_,a_Upper); \
12410 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12411 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12412 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12413 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12414 (void)(a_fDisHints); \
12415 (void)(a_fIemHints); \
12416 } while (0)
12417
12418#else
12419# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12420
12421# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12422 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12423# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12424 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12425# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12426 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12427# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12428 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12429# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12430 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12431
12432#endif
12433
12434#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12435 IEMOP_MNEMONIC0EX(a_Lower, \
12436 #a_Lower, \
12437 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12438#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12439 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12440 #a_Lower " " #a_Op1, \
12441 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12442#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12443 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12444 #a_Lower " " #a_Op1 "," #a_Op2, \
12445 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12446#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12447 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12448 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12449 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12450#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12451 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12452 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12453 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12454
12455/** @} */
12456
12457
12458/** @name Opcode Helpers.
12459 * @{
12460 */
12461
12462#ifdef IN_RING3
12463# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12464 do { \
12465 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12466 else \
12467 { \
12468 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12469 return IEMOP_RAISE_INVALID_OPCODE(); \
12470 } \
12471 } while (0)
12472#else
12473# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12474 do { \
12475 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12476 else return IEMOP_RAISE_INVALID_OPCODE(); \
12477 } while (0)
12478#endif
12479
12480/** The instruction requires a 186 or later. */
12481#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12482# define IEMOP_HLP_MIN_186() do { } while (0)
12483#else
12484# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12485#endif
12486
12487/** The instruction requires a 286 or later. */
12488#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12489# define IEMOP_HLP_MIN_286() do { } while (0)
12490#else
12491# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12492#endif
12493
12494/** The instruction requires a 386 or later. */
12495#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12496# define IEMOP_HLP_MIN_386() do { } while (0)
12497#else
12498# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12499#endif
12500
12501/** The instruction requires a 386 or later if the given expression is true. */
12502#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12503# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12504#else
12505# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12506#endif
12507
12508/** The instruction requires a 486 or later. */
12509#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12510# define IEMOP_HLP_MIN_486() do { } while (0)
12511#else
12512# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12513#endif
12514
12515/** The instruction requires a Pentium (586) or later. */
12516#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12517# define IEMOP_HLP_MIN_586() do { } while (0)
12518#else
12519# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12520#endif
12521
12522/** The instruction requires a PentiumPro (686) or later. */
12523#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12524# define IEMOP_HLP_MIN_686() do { } while (0)
12525#else
12526# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12527#endif
12528
12529
12530/** The instruction raises an \#UD in real and V8086 mode. */
12531#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12532 do \
12533 { \
12534 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12535 else return IEMOP_RAISE_INVALID_OPCODE(); \
12536 } while (0)
12537
12538#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12539/** This instruction raises an \#UD in real and V8086 mode or when not using a
12540 * 64-bit code segment when in long mode (applicable to all VMX instructions
12541 * except VMCALL).
12542 *
12543 * @note Update IEM_VMX_INSTR_CHECKS() if changes are made here.
12544 */
12545#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12546 do \
12547 { \
12548 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12549 && ( !IEM_IS_LONG_MODE(pVCpu) \
12550 || IEM_IS_64BIT_CODE(pVCpu))) \
12551 { /* likely */ } \
12552 else \
12553 { \
12554 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12555 { \
12556 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12557 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12558 return IEMOP_RAISE_INVALID_OPCODE(); \
12559 } \
12560 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12561 { \
12562 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12563 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12564 return IEMOP_RAISE_INVALID_OPCODE(); \
12565 } \
12566 } \
12567 } while (0)
12568
12569/** The instruction can only be executed in VMX operation (VMX root mode and
12570 * non-root mode).
12571 *
12572 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12573 */
12574# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12575 do \
12576 { \
12577 if (IEM_IS_VMX_ROOT_MODE(pVCpu)) { /* likely */ } \
12578 else \
12579 { \
12580 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12581 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12582 return IEMOP_RAISE_INVALID_OPCODE(); \
12583 } \
12584 } while (0)
12585#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12586
12587/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12588 * 64-bit mode. */
12589#define IEMOP_HLP_NO_64BIT() \
12590 do \
12591 { \
12592 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12593 return IEMOP_RAISE_INVALID_OPCODE(); \
12594 } while (0)
12595
12596/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12597 * 64-bit mode. */
12598#define IEMOP_HLP_ONLY_64BIT() \
12599 do \
12600 { \
12601 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12602 return IEMOP_RAISE_INVALID_OPCODE(); \
12603 } while (0)
12604
12605/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12606#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12607 do \
12608 { \
12609 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12610 iemRecalEffOpSize64Default(pVCpu); \
12611 } while (0)
12612
12613/** The instruction has 64-bit operand size if 64-bit mode. */
12614#define IEMOP_HLP_64BIT_OP_SIZE() \
12615 do \
12616 { \
12617 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12618 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12619 } while (0)
12620
12621/** Only a REX prefix immediately preceeding the first opcode byte takes
12622 * effect. This macro helps ensuring this as well as logging bad guest code. */
12623#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12624 do \
12625 { \
12626 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12627 { \
12628 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12629 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12630 pVCpu->iem.s.uRexB = 0; \
12631 pVCpu->iem.s.uRexIndex = 0; \
12632 pVCpu->iem.s.uRexReg = 0; \
12633 iemRecalEffOpSize(pVCpu); \
12634 } \
12635 } while (0)
12636
12637/**
12638 * Done decoding.
12639 */
12640#define IEMOP_HLP_DONE_DECODING() \
12641 do \
12642 { \
12643 /*nothing for now, maybe later... */ \
12644 } while (0)
12645
12646/**
12647 * Done decoding, raise \#UD exception if lock prefix present.
12648 */
12649#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12650 do \
12651 { \
12652 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12653 { /* likely */ } \
12654 else \
12655 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12656 } while (0)
12657
12658
12659/**
12660 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12661 * repnz or size prefixes are present, or if in real or v8086 mode.
12662 */
12663#define IEMOP_HLP_DONE_VEX_DECODING() \
12664 do \
12665 { \
12666 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12667 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12668 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12669 { /* likely */ } \
12670 else \
12671 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12672 } while (0)
12673
12674/**
12675 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12676 * repnz or size prefixes are present, or if in real or v8086 mode.
12677 */
12678#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12679 do \
12680 { \
12681 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12682 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12683 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12684 && pVCpu->iem.s.uVexLength == 0)) \
12685 { /* likely */ } \
12686 else \
12687 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12688 } while (0)
12689
12690
12691/**
12692 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12693 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12694 * register 0, or if in real or v8086 mode.
12695 */
12696#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12697 do \
12698 { \
12699 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12700 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12701 && !pVCpu->iem.s.uVex3rdReg \
12702 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12703 { /* likely */ } \
12704 else \
12705 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12706 } while (0)
12707
12708/**
12709 * Done decoding VEX, no V, L=0.
12710 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12711 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12712 */
12713#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12714 do \
12715 { \
12716 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12717 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12718 && pVCpu->iem.s.uVexLength == 0 \
12719 && pVCpu->iem.s.uVex3rdReg == 0 \
12720 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12721 { /* likely */ } \
12722 else \
12723 return IEMOP_RAISE_INVALID_OPCODE(); \
12724 } while (0)
12725
12726#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12727 do \
12728 { \
12729 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12730 { /* likely */ } \
12731 else \
12732 { \
12733 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12734 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12735 } \
12736 } while (0)
12737#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12738 do \
12739 { \
12740 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12741 { /* likely */ } \
12742 else \
12743 { \
12744 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12745 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12746 } \
12747 } while (0)
12748
12749/**
12750 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12751 * are present.
12752 */
12753#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12754 do \
12755 { \
12756 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12757 { /* likely */ } \
12758 else \
12759 return IEMOP_RAISE_INVALID_OPCODE(); \
12760 } while (0)
12761
12762/**
12763 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12764 * prefixes are present.
12765 */
12766#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12767 do \
12768 { \
12769 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12770 { /* likely */ } \
12771 else \
12772 return IEMOP_RAISE_INVALID_OPCODE(); \
12773 } while (0)
12774
12775
12776/**
12777 * Calculates the effective address of a ModR/M memory operand.
12778 *
12779 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12780 *
12781 * @return Strict VBox status code.
12782 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12783 * @param bRm The ModRM byte.
12784 * @param cbImm The size of any immediate following the
12785 * effective address opcode bytes. Important for
12786 * RIP relative addressing.
12787 * @param pGCPtrEff Where to return the effective address.
12788 */
12789IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12790{
12791 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12792# define SET_SS_DEF() \
12793 do \
12794 { \
12795 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12796 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12797 } while (0)
12798
12799 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12800 {
12801/** @todo Check the effective address size crap! */
12802 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12803 {
12804 uint16_t u16EffAddr;
12805
12806 /* Handle the disp16 form with no registers first. */
12807 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12808 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12809 else
12810 {
12811 /* Get the displacment. */
12812 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12813 {
12814 case 0: u16EffAddr = 0; break;
12815 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12816 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12817 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12818 }
12819
12820 /* Add the base and index registers to the disp. */
12821 switch (bRm & X86_MODRM_RM_MASK)
12822 {
12823 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12824 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12825 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12826 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12827 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12828 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12829 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12830 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12831 }
12832 }
12833
12834 *pGCPtrEff = u16EffAddr;
12835 }
12836 else
12837 {
12838 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12839 uint32_t u32EffAddr;
12840
12841 /* Handle the disp32 form with no registers first. */
12842 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12843 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12844 else
12845 {
12846 /* Get the register (or SIB) value. */
12847 switch ((bRm & X86_MODRM_RM_MASK))
12848 {
12849 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12850 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12851 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12852 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12853 case 4: /* SIB */
12854 {
12855 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12856
12857 /* Get the index and scale it. */
12858 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12859 {
12860 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12861 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12862 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12863 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12864 case 4: u32EffAddr = 0; /*none */ break;
12865 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12866 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12867 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12868 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12869 }
12870 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12871
12872 /* add base */
12873 switch (bSib & X86_SIB_BASE_MASK)
12874 {
12875 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12876 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12877 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12878 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12879 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12880 case 5:
12881 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12882 {
12883 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12884 SET_SS_DEF();
12885 }
12886 else
12887 {
12888 uint32_t u32Disp;
12889 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12890 u32EffAddr += u32Disp;
12891 }
12892 break;
12893 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12894 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12895 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12896 }
12897 break;
12898 }
12899 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12900 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12901 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12902 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12903 }
12904
12905 /* Get and add the displacement. */
12906 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12907 {
12908 case 0:
12909 break;
12910 case 1:
12911 {
12912 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12913 u32EffAddr += i8Disp;
12914 break;
12915 }
12916 case 2:
12917 {
12918 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12919 u32EffAddr += u32Disp;
12920 break;
12921 }
12922 default:
12923 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12924 }
12925
12926 }
12927 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12928 *pGCPtrEff = u32EffAddr;
12929 else
12930 {
12931 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12932 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12933 }
12934 }
12935 }
12936 else
12937 {
12938 uint64_t u64EffAddr;
12939
12940 /* Handle the rip+disp32 form with no registers first. */
12941 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12942 {
12943 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12944 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12945 }
12946 else
12947 {
12948 /* Get the register (or SIB) value. */
12949 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
12950 {
12951 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12952 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12953 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12954 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12955 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
12956 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12957 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12958 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12959 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12960 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12961 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12962 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12963 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12964 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12965 /* SIB */
12966 case 4:
12967 case 12:
12968 {
12969 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12970
12971 /* Get the index and scale it. */
12972 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
12973 {
12974 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
12975 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
12976 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
12977 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
12978 case 4: u64EffAddr = 0; /*none */ break;
12979 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
12980 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
12981 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
12982 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
12983 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
12984 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
12985 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
12986 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
12987 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
12988 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
12989 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
12990 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12991 }
12992 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12993
12994 /* add base */
12995 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
12996 {
12997 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
12998 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
12999 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13000 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13001 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13002 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13003 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13004 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13005 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13006 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13007 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13008 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13009 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13010 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13011 /* complicated encodings */
13012 case 5:
13013 case 13:
13014 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13015 {
13016 if (!pVCpu->iem.s.uRexB)
13017 {
13018 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13019 SET_SS_DEF();
13020 }
13021 else
13022 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13023 }
13024 else
13025 {
13026 uint32_t u32Disp;
13027 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13028 u64EffAddr += (int32_t)u32Disp;
13029 }
13030 break;
13031 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13032 }
13033 break;
13034 }
13035 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13036 }
13037
13038 /* Get and add the displacement. */
13039 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13040 {
13041 case 0:
13042 break;
13043 case 1:
13044 {
13045 int8_t i8Disp;
13046 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13047 u64EffAddr += i8Disp;
13048 break;
13049 }
13050 case 2:
13051 {
13052 uint32_t u32Disp;
13053 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13054 u64EffAddr += (int32_t)u32Disp;
13055 break;
13056 }
13057 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13058 }
13059
13060 }
13061
13062 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13063 *pGCPtrEff = u64EffAddr;
13064 else
13065 {
13066 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13067 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13068 }
13069 }
13070
13071 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13072 return VINF_SUCCESS;
13073}
13074
13075
13076/**
13077 * Calculates the effective address of a ModR/M memory operand.
13078 *
13079 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13080 *
13081 * @return Strict VBox status code.
13082 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13083 * @param bRm The ModRM byte.
13084 * @param cbImm The size of any immediate following the
13085 * effective address opcode bytes. Important for
13086 * RIP relative addressing.
13087 * @param pGCPtrEff Where to return the effective address.
13088 * @param offRsp RSP displacement.
13089 */
13090IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13091{
13092 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13093# define SET_SS_DEF() \
13094 do \
13095 { \
13096 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13097 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13098 } while (0)
13099
13100 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13101 {
13102/** @todo Check the effective address size crap! */
13103 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13104 {
13105 uint16_t u16EffAddr;
13106
13107 /* Handle the disp16 form with no registers first. */
13108 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13109 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13110 else
13111 {
13112 /* Get the displacment. */
13113 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13114 {
13115 case 0: u16EffAddr = 0; break;
13116 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13117 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13118 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13119 }
13120
13121 /* Add the base and index registers to the disp. */
13122 switch (bRm & X86_MODRM_RM_MASK)
13123 {
13124 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13125 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13126 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13127 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13128 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13129 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13130 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13131 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13132 }
13133 }
13134
13135 *pGCPtrEff = u16EffAddr;
13136 }
13137 else
13138 {
13139 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13140 uint32_t u32EffAddr;
13141
13142 /* Handle the disp32 form with no registers first. */
13143 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13144 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13145 else
13146 {
13147 /* Get the register (or SIB) value. */
13148 switch ((bRm & X86_MODRM_RM_MASK))
13149 {
13150 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13151 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13152 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13153 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13154 case 4: /* SIB */
13155 {
13156 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13157
13158 /* Get the index and scale it. */
13159 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13160 {
13161 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13162 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13163 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13164 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13165 case 4: u32EffAddr = 0; /*none */ break;
13166 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13167 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13168 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13169 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13170 }
13171 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13172
13173 /* add base */
13174 switch (bSib & X86_SIB_BASE_MASK)
13175 {
13176 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13177 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13178 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13179 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13180 case 4:
13181 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13182 SET_SS_DEF();
13183 break;
13184 case 5:
13185 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13186 {
13187 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13188 SET_SS_DEF();
13189 }
13190 else
13191 {
13192 uint32_t u32Disp;
13193 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13194 u32EffAddr += u32Disp;
13195 }
13196 break;
13197 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13198 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13199 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13200 }
13201 break;
13202 }
13203 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13204 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13205 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13206 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13207 }
13208
13209 /* Get and add the displacement. */
13210 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13211 {
13212 case 0:
13213 break;
13214 case 1:
13215 {
13216 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13217 u32EffAddr += i8Disp;
13218 break;
13219 }
13220 case 2:
13221 {
13222 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13223 u32EffAddr += u32Disp;
13224 break;
13225 }
13226 default:
13227 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13228 }
13229
13230 }
13231 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13232 *pGCPtrEff = u32EffAddr;
13233 else
13234 {
13235 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13236 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13237 }
13238 }
13239 }
13240 else
13241 {
13242 uint64_t u64EffAddr;
13243
13244 /* Handle the rip+disp32 form with no registers first. */
13245 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13246 {
13247 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13248 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13249 }
13250 else
13251 {
13252 /* Get the register (or SIB) value. */
13253 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13254 {
13255 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13256 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13257 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13258 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13259 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13260 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13261 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13262 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13263 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13264 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13265 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13266 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13267 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13268 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13269 /* SIB */
13270 case 4:
13271 case 12:
13272 {
13273 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13274
13275 /* Get the index and scale it. */
13276 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13277 {
13278 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13279 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13280 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13281 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13282 case 4: u64EffAddr = 0; /*none */ break;
13283 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13284 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13285 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13286 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13287 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13288 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13289 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13290 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13291 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13292 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13293 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13294 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13295 }
13296 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13297
13298 /* add base */
13299 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13300 {
13301 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13302 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13303 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13304 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13305 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13306 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13307 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13308 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13309 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13310 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13311 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13312 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13313 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13314 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13315 /* complicated encodings */
13316 case 5:
13317 case 13:
13318 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13319 {
13320 if (!pVCpu->iem.s.uRexB)
13321 {
13322 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13323 SET_SS_DEF();
13324 }
13325 else
13326 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13327 }
13328 else
13329 {
13330 uint32_t u32Disp;
13331 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13332 u64EffAddr += (int32_t)u32Disp;
13333 }
13334 break;
13335 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13336 }
13337 break;
13338 }
13339 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13340 }
13341
13342 /* Get and add the displacement. */
13343 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13344 {
13345 case 0:
13346 break;
13347 case 1:
13348 {
13349 int8_t i8Disp;
13350 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13351 u64EffAddr += i8Disp;
13352 break;
13353 }
13354 case 2:
13355 {
13356 uint32_t u32Disp;
13357 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13358 u64EffAddr += (int32_t)u32Disp;
13359 break;
13360 }
13361 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13362 }
13363
13364 }
13365
13366 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13367 *pGCPtrEff = u64EffAddr;
13368 else
13369 {
13370 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13371 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13372 }
13373 }
13374
13375 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13376 return VINF_SUCCESS;
13377}
13378
13379
13380#ifdef IEM_WITH_SETJMP
13381/**
13382 * Calculates the effective address of a ModR/M memory operand.
13383 *
13384 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13385 *
13386 * May longjmp on internal error.
13387 *
13388 * @return The effective address.
13389 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13390 * @param bRm The ModRM byte.
13391 * @param cbImm The size of any immediate following the
13392 * effective address opcode bytes. Important for
13393 * RIP relative addressing.
13394 */
13395IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13396{
13397 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13398# define SET_SS_DEF() \
13399 do \
13400 { \
13401 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13402 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13403 } while (0)
13404
13405 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13406 {
13407/** @todo Check the effective address size crap! */
13408 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13409 {
13410 uint16_t u16EffAddr;
13411
13412 /* Handle the disp16 form with no registers first. */
13413 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13414 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13415 else
13416 {
13417 /* Get the displacment. */
13418 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13419 {
13420 case 0: u16EffAddr = 0; break;
13421 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13422 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13423 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13424 }
13425
13426 /* Add the base and index registers to the disp. */
13427 switch (bRm & X86_MODRM_RM_MASK)
13428 {
13429 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13430 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13431 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13432 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13433 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13434 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13435 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13436 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13437 }
13438 }
13439
13440 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13441 return u16EffAddr;
13442 }
13443
13444 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13445 uint32_t u32EffAddr;
13446
13447 /* Handle the disp32 form with no registers first. */
13448 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13449 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13450 else
13451 {
13452 /* Get the register (or SIB) value. */
13453 switch ((bRm & X86_MODRM_RM_MASK))
13454 {
13455 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13456 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13457 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13458 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13459 case 4: /* SIB */
13460 {
13461 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13462
13463 /* Get the index and scale it. */
13464 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13465 {
13466 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13467 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13468 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13469 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13470 case 4: u32EffAddr = 0; /*none */ break;
13471 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13472 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13473 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13474 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13475 }
13476 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13477
13478 /* add base */
13479 switch (bSib & X86_SIB_BASE_MASK)
13480 {
13481 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13482 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13483 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13484 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13485 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13486 case 5:
13487 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13488 {
13489 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13490 SET_SS_DEF();
13491 }
13492 else
13493 {
13494 uint32_t u32Disp;
13495 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13496 u32EffAddr += u32Disp;
13497 }
13498 break;
13499 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13500 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13501 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13502 }
13503 break;
13504 }
13505 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13506 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13507 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13508 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13509 }
13510
13511 /* Get and add the displacement. */
13512 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13513 {
13514 case 0:
13515 break;
13516 case 1:
13517 {
13518 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13519 u32EffAddr += i8Disp;
13520 break;
13521 }
13522 case 2:
13523 {
13524 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13525 u32EffAddr += u32Disp;
13526 break;
13527 }
13528 default:
13529 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13530 }
13531 }
13532
13533 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13534 {
13535 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13536 return u32EffAddr;
13537 }
13538 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13539 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13540 return u32EffAddr & UINT16_MAX;
13541 }
13542
13543 uint64_t u64EffAddr;
13544
13545 /* Handle the rip+disp32 form with no registers first. */
13546 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13547 {
13548 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13549 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13550 }
13551 else
13552 {
13553 /* Get the register (or SIB) value. */
13554 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13555 {
13556 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13557 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13558 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13559 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13560 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13561 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13562 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13563 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13564 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13565 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13566 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13567 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13568 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13569 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13570 /* SIB */
13571 case 4:
13572 case 12:
13573 {
13574 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13575
13576 /* Get the index and scale it. */
13577 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13578 {
13579 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13580 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13581 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13582 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13583 case 4: u64EffAddr = 0; /*none */ break;
13584 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13585 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13586 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13587 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13588 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13589 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13590 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13591 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13592 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13593 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13594 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13595 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13596 }
13597 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13598
13599 /* add base */
13600 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13601 {
13602 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13603 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13604 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13605 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13606 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13607 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13608 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13609 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13610 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13611 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13612 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13613 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13614 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13615 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13616 /* complicated encodings */
13617 case 5:
13618 case 13:
13619 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13620 {
13621 if (!pVCpu->iem.s.uRexB)
13622 {
13623 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13624 SET_SS_DEF();
13625 }
13626 else
13627 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13628 }
13629 else
13630 {
13631 uint32_t u32Disp;
13632 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13633 u64EffAddr += (int32_t)u32Disp;
13634 }
13635 break;
13636 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13637 }
13638 break;
13639 }
13640 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13641 }
13642
13643 /* Get and add the displacement. */
13644 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13645 {
13646 case 0:
13647 break;
13648 case 1:
13649 {
13650 int8_t i8Disp;
13651 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13652 u64EffAddr += i8Disp;
13653 break;
13654 }
13655 case 2:
13656 {
13657 uint32_t u32Disp;
13658 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13659 u64EffAddr += (int32_t)u32Disp;
13660 break;
13661 }
13662 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13663 }
13664
13665 }
13666
13667 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13668 {
13669 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13670 return u64EffAddr;
13671 }
13672 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13673 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13674 return u64EffAddr & UINT32_MAX;
13675}
13676#endif /* IEM_WITH_SETJMP */
13677
13678/** @} */
13679
13680
13681
13682/*
13683 * Include the instructions
13684 */
13685#include "IEMAllInstructions.cpp.h"
13686
13687
13688
13689#ifdef LOG_ENABLED
13690/**
13691 * Logs the current instruction.
13692 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13693 * @param fSameCtx Set if we have the same context information as the VMM,
13694 * clear if we may have already executed an instruction in
13695 * our debug context. When clear, we assume IEMCPU holds
13696 * valid CPU mode info.
13697 *
13698 * The @a fSameCtx parameter is now misleading and obsolete.
13699 * @param pszFunction The IEM function doing the execution.
13700 */
13701IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13702{
13703# ifdef IN_RING3
13704 if (LogIs2Enabled())
13705 {
13706 char szInstr[256];
13707 uint32_t cbInstr = 0;
13708 if (fSameCtx)
13709 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13710 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13711 szInstr, sizeof(szInstr), &cbInstr);
13712 else
13713 {
13714 uint32_t fFlags = 0;
13715 switch (pVCpu->iem.s.enmCpuMode)
13716 {
13717 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13718 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13719 case IEMMODE_16BIT:
13720 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13721 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13722 else
13723 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13724 break;
13725 }
13726 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13727 szInstr, sizeof(szInstr), &cbInstr);
13728 }
13729
13730 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13731 Log2(("**** %s\n"
13732 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13733 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13734 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13735 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13736 " %s\n"
13737 , pszFunction,
13738 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13739 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13740 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13741 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13742 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13743 szInstr));
13744
13745 if (LogIs3Enabled())
13746 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13747 }
13748 else
13749# endif
13750 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13751 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13752 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13753}
13754#endif /* LOG_ENABLED */
13755
13756
13757/**
13758 * Makes status code addjustments (pass up from I/O and access handler)
13759 * as well as maintaining statistics.
13760 *
13761 * @returns Strict VBox status code to pass up.
13762 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13763 * @param rcStrict The status from executing an instruction.
13764 */
13765DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13766{
13767 if (rcStrict != VINF_SUCCESS)
13768 {
13769 if (RT_SUCCESS(rcStrict))
13770 {
13771 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13772 || rcStrict == VINF_IOM_R3_IOPORT_READ
13773 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13774 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13775 || rcStrict == VINF_IOM_R3_MMIO_READ
13776 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13777 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13778 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13779 || rcStrict == VINF_CPUM_R3_MSR_READ
13780 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13781 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13782 || rcStrict == VINF_EM_RAW_TO_R3
13783 || rcStrict == VINF_EM_TRIPLE_FAULT
13784 || rcStrict == VINF_GIM_R3_HYPERCALL
13785 /* raw-mode / virt handlers only: */
13786 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13787 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13788 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13789 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13790 || rcStrict == VINF_SELM_SYNC_GDT
13791 || rcStrict == VINF_CSAM_PENDING_ACTION
13792 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13793 /* nested hw.virt codes: */
13794 || rcStrict == VINF_SVM_VMEXIT
13795 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13796/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13797 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13798#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13799 if ( rcStrict == VINF_SVM_VMEXIT
13800 && rcPassUp == VINF_SUCCESS)
13801 rcStrict = VINF_SUCCESS;
13802 else
13803#endif
13804 if (rcPassUp == VINF_SUCCESS)
13805 pVCpu->iem.s.cRetInfStatuses++;
13806 else if ( rcPassUp < VINF_EM_FIRST
13807 || rcPassUp > VINF_EM_LAST
13808 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13809 {
13810 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13811 pVCpu->iem.s.cRetPassUpStatus++;
13812 rcStrict = rcPassUp;
13813 }
13814 else
13815 {
13816 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13817 pVCpu->iem.s.cRetInfStatuses++;
13818 }
13819 }
13820 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13821 pVCpu->iem.s.cRetAspectNotImplemented++;
13822 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13823 pVCpu->iem.s.cRetInstrNotImplemented++;
13824 else
13825 pVCpu->iem.s.cRetErrStatuses++;
13826 }
13827 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13828 {
13829 pVCpu->iem.s.cRetPassUpStatus++;
13830 rcStrict = pVCpu->iem.s.rcPassUp;
13831 }
13832
13833 return rcStrict;
13834}
13835
13836
13837/**
13838 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13839 * IEMExecOneWithPrefetchedByPC.
13840 *
13841 * Similar code is found in IEMExecLots.
13842 *
13843 * @return Strict VBox status code.
13844 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13845 * @param fExecuteInhibit If set, execute the instruction following CLI,
13846 * POP SS and MOV SS,GR.
13847 * @param pszFunction The calling function name.
13848 */
13849DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13850{
13851 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13852 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13853 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13854 RT_NOREF_PV(pszFunction);
13855
13856#ifdef IEM_WITH_SETJMP
13857 VBOXSTRICTRC rcStrict;
13858 jmp_buf JmpBuf;
13859 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13860 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13861 if ((rcStrict = setjmp(JmpBuf)) == 0)
13862 {
13863 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13864 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13865 }
13866 else
13867 pVCpu->iem.s.cLongJumps++;
13868 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13869#else
13870 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13871 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13872#endif
13873 if (rcStrict == VINF_SUCCESS)
13874 pVCpu->iem.s.cInstructions++;
13875 if (pVCpu->iem.s.cActiveMappings > 0)
13876 {
13877 Assert(rcStrict != VINF_SUCCESS);
13878 iemMemRollback(pVCpu);
13879 }
13880 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13881 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13882 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13883
13884//#ifdef DEBUG
13885// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13886//#endif
13887
13888 /* Execute the next instruction as well if a cli, pop ss or
13889 mov ss, Gr has just completed successfully. */
13890 if ( fExecuteInhibit
13891 && rcStrict == VINF_SUCCESS
13892 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13893 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13894 {
13895 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13896 if (rcStrict == VINF_SUCCESS)
13897 {
13898#ifdef LOG_ENABLED
13899 iemLogCurInstr(pVCpu, false, pszFunction);
13900#endif
13901#ifdef IEM_WITH_SETJMP
13902 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13903 if ((rcStrict = setjmp(JmpBuf)) == 0)
13904 {
13905 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13906 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13907 }
13908 else
13909 pVCpu->iem.s.cLongJumps++;
13910 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13911#else
13912 IEM_OPCODE_GET_NEXT_U8(&b);
13913 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13914#endif
13915 if (rcStrict == VINF_SUCCESS)
13916 pVCpu->iem.s.cInstructions++;
13917 if (pVCpu->iem.s.cActiveMappings > 0)
13918 {
13919 Assert(rcStrict != VINF_SUCCESS);
13920 iemMemRollback(pVCpu);
13921 }
13922 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13923 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13924 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13925 }
13926 else if (pVCpu->iem.s.cActiveMappings > 0)
13927 iemMemRollback(pVCpu);
13928 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
13929 }
13930
13931 /*
13932 * Return value fiddling, statistics and sanity assertions.
13933 */
13934 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
13935
13936 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
13937 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
13938 return rcStrict;
13939}
13940
13941
13942#ifdef IN_RC
13943/**
13944 * Re-enters raw-mode or ensure we return to ring-3.
13945 *
13946 * @returns rcStrict, maybe modified.
13947 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13948 * @param rcStrict The status code returne by the interpreter.
13949 */
13950DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13951{
13952 if ( !pVCpu->iem.s.fInPatchCode
13953 && ( rcStrict == VINF_SUCCESS
13954 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
13955 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
13956 {
13957 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
13958 CPUMRawEnter(pVCpu);
13959 else
13960 {
13961 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
13962 rcStrict = VINF_EM_RESCHEDULE;
13963 }
13964 }
13965 return rcStrict;
13966}
13967#endif
13968
13969
13970/**
13971 * Execute one instruction.
13972 *
13973 * @return Strict VBox status code.
13974 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13975 */
13976VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
13977{
13978#ifdef LOG_ENABLED
13979 iemLogCurInstr(pVCpu, true, "IEMExecOne");
13980#endif
13981
13982 /*
13983 * Do the decoding and emulation.
13984 */
13985 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
13986 if (rcStrict == VINF_SUCCESS)
13987 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
13988 else if (pVCpu->iem.s.cActiveMappings > 0)
13989 iemMemRollback(pVCpu);
13990
13991#ifdef IN_RC
13992 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
13993#endif
13994 if (rcStrict != VINF_SUCCESS)
13995 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
13996 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
13997 return rcStrict;
13998}
13999
14000
14001VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14002{
14003 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14004
14005 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14006 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14007 if (rcStrict == VINF_SUCCESS)
14008 {
14009 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14010 if (pcbWritten)
14011 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14012 }
14013 else if (pVCpu->iem.s.cActiveMappings > 0)
14014 iemMemRollback(pVCpu);
14015
14016#ifdef IN_RC
14017 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14018#endif
14019 return rcStrict;
14020}
14021
14022
14023VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14024 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14025{
14026 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14027
14028 VBOXSTRICTRC rcStrict;
14029 if ( cbOpcodeBytes
14030 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14031 {
14032 iemInitDecoder(pVCpu, false);
14033#ifdef IEM_WITH_CODE_TLB
14034 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14035 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14036 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14037 pVCpu->iem.s.offCurInstrStart = 0;
14038 pVCpu->iem.s.offInstrNextByte = 0;
14039#else
14040 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14041 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14042#endif
14043 rcStrict = VINF_SUCCESS;
14044 }
14045 else
14046 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14047 if (rcStrict == VINF_SUCCESS)
14048 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14049 else if (pVCpu->iem.s.cActiveMappings > 0)
14050 iemMemRollback(pVCpu);
14051
14052#ifdef IN_RC
14053 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14054#endif
14055 return rcStrict;
14056}
14057
14058
14059VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14060{
14061 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14062
14063 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14064 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14065 if (rcStrict == VINF_SUCCESS)
14066 {
14067 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14068 if (pcbWritten)
14069 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14070 }
14071 else if (pVCpu->iem.s.cActiveMappings > 0)
14072 iemMemRollback(pVCpu);
14073
14074#ifdef IN_RC
14075 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14076#endif
14077 return rcStrict;
14078}
14079
14080
14081VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14082 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14083{
14084 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14085
14086 VBOXSTRICTRC rcStrict;
14087 if ( cbOpcodeBytes
14088 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14089 {
14090 iemInitDecoder(pVCpu, true);
14091#ifdef IEM_WITH_CODE_TLB
14092 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14093 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14094 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14095 pVCpu->iem.s.offCurInstrStart = 0;
14096 pVCpu->iem.s.offInstrNextByte = 0;
14097#else
14098 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14099 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14100#endif
14101 rcStrict = VINF_SUCCESS;
14102 }
14103 else
14104 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14105 if (rcStrict == VINF_SUCCESS)
14106 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14107 else if (pVCpu->iem.s.cActiveMappings > 0)
14108 iemMemRollback(pVCpu);
14109
14110#ifdef IN_RC
14111 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14112#endif
14113 return rcStrict;
14114}
14115
14116
14117/**
14118 * For debugging DISGetParamSize, may come in handy.
14119 *
14120 * @returns Strict VBox status code.
14121 * @param pVCpu The cross context virtual CPU structure of the
14122 * calling EMT.
14123 * @param pCtxCore The context core structure.
14124 * @param OpcodeBytesPC The PC of the opcode bytes.
14125 * @param pvOpcodeBytes Prefeched opcode bytes.
14126 * @param cbOpcodeBytes Number of prefetched bytes.
14127 * @param pcbWritten Where to return the number of bytes written.
14128 * Optional.
14129 */
14130VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14131 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14132 uint32_t *pcbWritten)
14133{
14134 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14135
14136 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14137 VBOXSTRICTRC rcStrict;
14138 if ( cbOpcodeBytes
14139 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14140 {
14141 iemInitDecoder(pVCpu, true);
14142#ifdef IEM_WITH_CODE_TLB
14143 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14144 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14145 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14146 pVCpu->iem.s.offCurInstrStart = 0;
14147 pVCpu->iem.s.offInstrNextByte = 0;
14148#else
14149 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14150 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14151#endif
14152 rcStrict = VINF_SUCCESS;
14153 }
14154 else
14155 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14156 if (rcStrict == VINF_SUCCESS)
14157 {
14158 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14159 if (pcbWritten)
14160 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14161 }
14162 else if (pVCpu->iem.s.cActiveMappings > 0)
14163 iemMemRollback(pVCpu);
14164
14165#ifdef IN_RC
14166 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14167#endif
14168 return rcStrict;
14169}
14170
14171
14172VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14173{
14174 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14175
14176 /*
14177 * See if there is an interrupt pending in TRPM, inject it if we can.
14178 */
14179 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14180#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14181 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14182 if (fIntrEnabled)
14183 {
14184 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14185 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14186 else
14187 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14188 }
14189#else
14190 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14191#endif
14192 if ( fIntrEnabled
14193 && TRPMHasTrap(pVCpu)
14194 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14195 {
14196 uint8_t u8TrapNo;
14197 TRPMEVENT enmType;
14198 RTGCUINT uErrCode;
14199 RTGCPTR uCr2;
14200 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14201 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14202 TRPMResetTrap(pVCpu);
14203 }
14204
14205 /*
14206 * Initial decoder init w/ prefetch, then setup setjmp.
14207 */
14208 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14209 if (rcStrict == VINF_SUCCESS)
14210 {
14211#ifdef IEM_WITH_SETJMP
14212 jmp_buf JmpBuf;
14213 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14214 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14215 pVCpu->iem.s.cActiveMappings = 0;
14216 if ((rcStrict = setjmp(JmpBuf)) == 0)
14217#endif
14218 {
14219 /*
14220 * The run loop. We limit ourselves to 4096 instructions right now.
14221 */
14222 PVM pVM = pVCpu->CTX_SUFF(pVM);
14223 uint32_t cInstr = 4096;
14224 for (;;)
14225 {
14226 /*
14227 * Log the state.
14228 */
14229#ifdef LOG_ENABLED
14230 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14231#endif
14232
14233 /*
14234 * Do the decoding and emulation.
14235 */
14236 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14237 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14238 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14239 {
14240 Assert(pVCpu->iem.s.cActiveMappings == 0);
14241 pVCpu->iem.s.cInstructions++;
14242 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14243 {
14244 uint32_t fCpu = pVCpu->fLocalForcedActions
14245 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14246 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14247 | VMCPU_FF_TLB_FLUSH
14248#ifdef VBOX_WITH_RAW_MODE
14249 | VMCPU_FF_TRPM_SYNC_IDT
14250 | VMCPU_FF_SELM_SYNC_TSS
14251 | VMCPU_FF_SELM_SYNC_GDT
14252 | VMCPU_FF_SELM_SYNC_LDT
14253#endif
14254 | VMCPU_FF_INHIBIT_INTERRUPTS
14255 | VMCPU_FF_BLOCK_NMIS
14256 | VMCPU_FF_UNHALT ));
14257
14258 if (RT_LIKELY( ( !fCpu
14259 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14260 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14261 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14262 {
14263 if (cInstr-- > 0)
14264 {
14265 Assert(pVCpu->iem.s.cActiveMappings == 0);
14266 iemReInitDecoder(pVCpu);
14267 continue;
14268 }
14269 }
14270 }
14271 Assert(pVCpu->iem.s.cActiveMappings == 0);
14272 }
14273 else if (pVCpu->iem.s.cActiveMappings > 0)
14274 iemMemRollback(pVCpu);
14275 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14276 break;
14277 }
14278 }
14279#ifdef IEM_WITH_SETJMP
14280 else
14281 {
14282 if (pVCpu->iem.s.cActiveMappings > 0)
14283 iemMemRollback(pVCpu);
14284 pVCpu->iem.s.cLongJumps++;
14285 }
14286 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14287#endif
14288
14289 /*
14290 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14291 */
14292 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14293 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14294 }
14295 else
14296 {
14297 if (pVCpu->iem.s.cActiveMappings > 0)
14298 iemMemRollback(pVCpu);
14299
14300#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14301 /*
14302 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14303 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14304 */
14305 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14306#endif
14307 }
14308
14309 /*
14310 * Maybe re-enter raw-mode and log.
14311 */
14312#ifdef IN_RC
14313 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14314#endif
14315 if (rcStrict != VINF_SUCCESS)
14316 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14317 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14318 if (pcInstructions)
14319 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14320 return rcStrict;
14321}
14322
14323
14324/**
14325 * Interface used by EMExecuteExec, does exit statistics and limits.
14326 *
14327 * @returns Strict VBox status code.
14328 * @param pVCpu The cross context virtual CPU structure.
14329 * @param fWillExit To be defined.
14330 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14331 * @param cMaxInstructions Maximum number of instructions to execute.
14332 * @param cMaxInstructionsWithoutExits
14333 * The max number of instructions without exits.
14334 * @param pStats Where to return statistics.
14335 */
14336VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14337 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14338{
14339 NOREF(fWillExit); /** @todo define flexible exit crits */
14340
14341 /*
14342 * Initialize return stats.
14343 */
14344 pStats->cInstructions = 0;
14345 pStats->cExits = 0;
14346 pStats->cMaxExitDistance = 0;
14347 pStats->cReserved = 0;
14348
14349 /*
14350 * Initial decoder init w/ prefetch, then setup setjmp.
14351 */
14352 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14353 if (rcStrict == VINF_SUCCESS)
14354 {
14355#ifdef IEM_WITH_SETJMP
14356 jmp_buf JmpBuf;
14357 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14358 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14359 pVCpu->iem.s.cActiveMappings = 0;
14360 if ((rcStrict = setjmp(JmpBuf)) == 0)
14361#endif
14362 {
14363#ifdef IN_RING0
14364 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14365#endif
14366 uint32_t cInstructionSinceLastExit = 0;
14367
14368 /*
14369 * The run loop. We limit ourselves to 4096 instructions right now.
14370 */
14371 PVM pVM = pVCpu->CTX_SUFF(pVM);
14372 for (;;)
14373 {
14374 /*
14375 * Log the state.
14376 */
14377#ifdef LOG_ENABLED
14378 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14379#endif
14380
14381 /*
14382 * Do the decoding and emulation.
14383 */
14384 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14385
14386 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14387 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14388
14389 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14390 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14391 {
14392 pStats->cExits += 1;
14393 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14394 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14395 cInstructionSinceLastExit = 0;
14396 }
14397
14398 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14399 {
14400 Assert(pVCpu->iem.s.cActiveMappings == 0);
14401 pVCpu->iem.s.cInstructions++;
14402 pStats->cInstructions++;
14403 cInstructionSinceLastExit++;
14404 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14405 {
14406 uint32_t fCpu = pVCpu->fLocalForcedActions
14407 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14408 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14409 | VMCPU_FF_TLB_FLUSH
14410#ifdef VBOX_WITH_RAW_MODE
14411 | VMCPU_FF_TRPM_SYNC_IDT
14412 | VMCPU_FF_SELM_SYNC_TSS
14413 | VMCPU_FF_SELM_SYNC_GDT
14414 | VMCPU_FF_SELM_SYNC_LDT
14415#endif
14416 | VMCPU_FF_INHIBIT_INTERRUPTS
14417 | VMCPU_FF_BLOCK_NMIS
14418 | VMCPU_FF_UNHALT ));
14419
14420 if (RT_LIKELY( ( ( !fCpu
14421 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14422 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14423 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )
14424 || pStats->cInstructions < cMinInstructions))
14425 {
14426 if (pStats->cInstructions < cMaxInstructions)
14427 {
14428 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14429 {
14430#ifdef IN_RING0
14431 if ( !fCheckPreemptionPending
14432 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14433#endif
14434 {
14435 Assert(pVCpu->iem.s.cActiveMappings == 0);
14436 iemReInitDecoder(pVCpu);
14437 continue;
14438 }
14439#ifdef IN_RING0
14440 rcStrict = VINF_EM_RAW_INTERRUPT;
14441 break;
14442#endif
14443 }
14444 }
14445 }
14446 Assert(!(fCpu & VMCPU_FF_IEM));
14447 }
14448 Assert(pVCpu->iem.s.cActiveMappings == 0);
14449 }
14450 else if (pVCpu->iem.s.cActiveMappings > 0)
14451 iemMemRollback(pVCpu);
14452 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14453 break;
14454 }
14455 }
14456#ifdef IEM_WITH_SETJMP
14457 else
14458 {
14459 if (pVCpu->iem.s.cActiveMappings > 0)
14460 iemMemRollback(pVCpu);
14461 pVCpu->iem.s.cLongJumps++;
14462 }
14463 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14464#endif
14465
14466 /*
14467 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14468 */
14469 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14470 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14471 }
14472 else
14473 {
14474 if (pVCpu->iem.s.cActiveMappings > 0)
14475 iemMemRollback(pVCpu);
14476
14477#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14478 /*
14479 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14480 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14481 */
14482 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14483#endif
14484 }
14485
14486 /*
14487 * Maybe re-enter raw-mode and log.
14488 */
14489#ifdef IN_RC
14490 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14491#endif
14492 if (rcStrict != VINF_SUCCESS)
14493 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14494 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14495 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14496 return rcStrict;
14497}
14498
14499
14500/**
14501 * Injects a trap, fault, abort, software interrupt or external interrupt.
14502 *
14503 * The parameter list matches TRPMQueryTrapAll pretty closely.
14504 *
14505 * @returns Strict VBox status code.
14506 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14507 * @param u8TrapNo The trap number.
14508 * @param enmType What type is it (trap/fault/abort), software
14509 * interrupt or hardware interrupt.
14510 * @param uErrCode The error code if applicable.
14511 * @param uCr2 The CR2 value if applicable.
14512 * @param cbInstr The instruction length (only relevant for
14513 * software interrupts).
14514 */
14515VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14516 uint8_t cbInstr)
14517{
14518 iemInitDecoder(pVCpu, false);
14519#ifdef DBGFTRACE_ENABLED
14520 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14521 u8TrapNo, enmType, uErrCode, uCr2);
14522#endif
14523
14524 uint32_t fFlags;
14525 switch (enmType)
14526 {
14527 case TRPM_HARDWARE_INT:
14528 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14529 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14530 uErrCode = uCr2 = 0;
14531 break;
14532
14533 case TRPM_SOFTWARE_INT:
14534 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14535 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14536 uErrCode = uCr2 = 0;
14537 break;
14538
14539 case TRPM_TRAP:
14540 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14541 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14542 if (u8TrapNo == X86_XCPT_PF)
14543 fFlags |= IEM_XCPT_FLAGS_CR2;
14544 switch (u8TrapNo)
14545 {
14546 case X86_XCPT_DF:
14547 case X86_XCPT_TS:
14548 case X86_XCPT_NP:
14549 case X86_XCPT_SS:
14550 case X86_XCPT_PF:
14551 case X86_XCPT_AC:
14552 fFlags |= IEM_XCPT_FLAGS_ERR;
14553 break;
14554
14555 case X86_XCPT_NMI:
14556 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14557 break;
14558 }
14559 break;
14560
14561 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14562 }
14563
14564 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14565
14566 if (pVCpu->iem.s.cActiveMappings > 0)
14567 iemMemRollback(pVCpu);
14568
14569 return rcStrict;
14570}
14571
14572
14573/**
14574 * Injects the active TRPM event.
14575 *
14576 * @returns Strict VBox status code.
14577 * @param pVCpu The cross context virtual CPU structure.
14578 */
14579VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14580{
14581#ifndef IEM_IMPLEMENTS_TASKSWITCH
14582 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14583#else
14584 uint8_t u8TrapNo;
14585 TRPMEVENT enmType;
14586 RTGCUINT uErrCode;
14587 RTGCUINTPTR uCr2;
14588 uint8_t cbInstr;
14589 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14590 if (RT_FAILURE(rc))
14591 return rc;
14592
14593 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14594# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14595 if (rcStrict == VINF_SVM_VMEXIT)
14596 rcStrict = VINF_SUCCESS;
14597# endif
14598
14599 /** @todo Are there any other codes that imply the event was successfully
14600 * delivered to the guest? See @bugref{6607}. */
14601 if ( rcStrict == VINF_SUCCESS
14602 || rcStrict == VINF_IEM_RAISED_XCPT)
14603 TRPMResetTrap(pVCpu);
14604
14605 return rcStrict;
14606#endif
14607}
14608
14609
14610VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14611{
14612 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14613 return VERR_NOT_IMPLEMENTED;
14614}
14615
14616
14617VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14618{
14619 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14620 return VERR_NOT_IMPLEMENTED;
14621}
14622
14623
14624#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14625/**
14626 * Executes a IRET instruction with default operand size.
14627 *
14628 * This is for PATM.
14629 *
14630 * @returns VBox status code.
14631 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14632 * @param pCtxCore The register frame.
14633 */
14634VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14635{
14636 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14637
14638 iemCtxCoreToCtx(pCtx, pCtxCore);
14639 iemInitDecoder(pVCpu);
14640 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14641 if (rcStrict == VINF_SUCCESS)
14642 iemCtxToCtxCore(pCtxCore, pCtx);
14643 else
14644 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14645 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14646 return rcStrict;
14647}
14648#endif
14649
14650
14651/**
14652 * Macro used by the IEMExec* method to check the given instruction length.
14653 *
14654 * Will return on failure!
14655 *
14656 * @param a_cbInstr The given instruction length.
14657 * @param a_cbMin The minimum length.
14658 */
14659#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14660 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14661 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14662
14663
14664/**
14665 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14666 *
14667 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14668 *
14669 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14670 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14671 * @param rcStrict The status code to fiddle.
14672 */
14673DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14674{
14675 iemUninitExec(pVCpu);
14676#ifdef IN_RC
14677 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14678#else
14679 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14680#endif
14681}
14682
14683
14684/**
14685 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14686 *
14687 * This API ASSUMES that the caller has already verified that the guest code is
14688 * allowed to access the I/O port. (The I/O port is in the DX register in the
14689 * guest state.)
14690 *
14691 * @returns Strict VBox status code.
14692 * @param pVCpu The cross context virtual CPU structure.
14693 * @param cbValue The size of the I/O port access (1, 2, or 4).
14694 * @param enmAddrMode The addressing mode.
14695 * @param fRepPrefix Indicates whether a repeat prefix is used
14696 * (doesn't matter which for this instruction).
14697 * @param cbInstr The instruction length in bytes.
14698 * @param iEffSeg The effective segment address.
14699 * @param fIoChecked Whether the access to the I/O port has been
14700 * checked or not. It's typically checked in the
14701 * HM scenario.
14702 */
14703VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14704 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14705{
14706 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14707 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14708
14709 /*
14710 * State init.
14711 */
14712 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14713
14714 /*
14715 * Switch orgy for getting to the right handler.
14716 */
14717 VBOXSTRICTRC rcStrict;
14718 if (fRepPrefix)
14719 {
14720 switch (enmAddrMode)
14721 {
14722 case IEMMODE_16BIT:
14723 switch (cbValue)
14724 {
14725 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14726 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14727 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14728 default:
14729 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14730 }
14731 break;
14732
14733 case IEMMODE_32BIT:
14734 switch (cbValue)
14735 {
14736 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14737 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14738 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14739 default:
14740 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14741 }
14742 break;
14743
14744 case IEMMODE_64BIT:
14745 switch (cbValue)
14746 {
14747 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14748 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14749 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14750 default:
14751 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14752 }
14753 break;
14754
14755 default:
14756 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14757 }
14758 }
14759 else
14760 {
14761 switch (enmAddrMode)
14762 {
14763 case IEMMODE_16BIT:
14764 switch (cbValue)
14765 {
14766 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14767 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14768 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14769 default:
14770 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14771 }
14772 break;
14773
14774 case IEMMODE_32BIT:
14775 switch (cbValue)
14776 {
14777 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14778 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14779 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14780 default:
14781 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14782 }
14783 break;
14784
14785 case IEMMODE_64BIT:
14786 switch (cbValue)
14787 {
14788 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14789 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14790 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14791 default:
14792 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14793 }
14794 break;
14795
14796 default:
14797 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14798 }
14799 }
14800
14801 if (pVCpu->iem.s.cActiveMappings)
14802 iemMemRollback(pVCpu);
14803
14804 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14805}
14806
14807
14808/**
14809 * Interface for HM and EM for executing string I/O IN (read) instructions.
14810 *
14811 * This API ASSUMES that the caller has already verified that the guest code is
14812 * allowed to access the I/O port. (The I/O port is in the DX register in the
14813 * guest state.)
14814 *
14815 * @returns Strict VBox status code.
14816 * @param pVCpu The cross context virtual CPU structure.
14817 * @param cbValue The size of the I/O port access (1, 2, or 4).
14818 * @param enmAddrMode The addressing mode.
14819 * @param fRepPrefix Indicates whether a repeat prefix is used
14820 * (doesn't matter which for this instruction).
14821 * @param cbInstr The instruction length in bytes.
14822 * @param fIoChecked Whether the access to the I/O port has been
14823 * checked or not. It's typically checked in the
14824 * HM scenario.
14825 */
14826VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14827 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14828{
14829 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14830
14831 /*
14832 * State init.
14833 */
14834 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14835
14836 /*
14837 * Switch orgy for getting to the right handler.
14838 */
14839 VBOXSTRICTRC rcStrict;
14840 if (fRepPrefix)
14841 {
14842 switch (enmAddrMode)
14843 {
14844 case IEMMODE_16BIT:
14845 switch (cbValue)
14846 {
14847 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14848 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14849 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14850 default:
14851 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14852 }
14853 break;
14854
14855 case IEMMODE_32BIT:
14856 switch (cbValue)
14857 {
14858 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14859 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14860 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14861 default:
14862 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14863 }
14864 break;
14865
14866 case IEMMODE_64BIT:
14867 switch (cbValue)
14868 {
14869 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14870 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14871 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14872 default:
14873 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14874 }
14875 break;
14876
14877 default:
14878 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14879 }
14880 }
14881 else
14882 {
14883 switch (enmAddrMode)
14884 {
14885 case IEMMODE_16BIT:
14886 switch (cbValue)
14887 {
14888 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14889 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14890 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14891 default:
14892 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14893 }
14894 break;
14895
14896 case IEMMODE_32BIT:
14897 switch (cbValue)
14898 {
14899 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14900 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14901 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14902 default:
14903 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14904 }
14905 break;
14906
14907 case IEMMODE_64BIT:
14908 switch (cbValue)
14909 {
14910 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14911 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14912 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14913 default:
14914 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14915 }
14916 break;
14917
14918 default:
14919 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14920 }
14921 }
14922
14923 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
14924 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14925}
14926
14927
14928/**
14929 * Interface for rawmode to write execute an OUT instruction.
14930 *
14931 * @returns Strict VBox status code.
14932 * @param pVCpu The cross context virtual CPU structure.
14933 * @param cbInstr The instruction length in bytes.
14934 * @param u16Port The port to read.
14935 * @param cbReg The register size.
14936 *
14937 * @remarks In ring-0 not all of the state needs to be synced in.
14938 */
14939VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14940{
14941 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14942 Assert(cbReg <= 4 && cbReg != 3);
14943
14944 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14945 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_out, u16Port, cbReg);
14946 Assert(!pVCpu->iem.s.cActiveMappings);
14947 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14948}
14949
14950
14951/**
14952 * Interface for rawmode to write execute an IN instruction.
14953 *
14954 * @returns Strict VBox status code.
14955 * @param pVCpu The cross context virtual CPU structure.
14956 * @param cbInstr The instruction length in bytes.
14957 * @param u16Port The port to read.
14958 * @param cbReg The register size.
14959 */
14960VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, uint8_t cbReg)
14961{
14962 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14963 Assert(cbReg <= 4 && cbReg != 3);
14964
14965 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14966 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_in, u16Port, cbReg);
14967 Assert(!pVCpu->iem.s.cActiveMappings);
14968 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14969}
14970
14971
14972/**
14973 * Interface for HM and EM to write to a CRx register.
14974 *
14975 * @returns Strict VBox status code.
14976 * @param pVCpu The cross context virtual CPU structure.
14977 * @param cbInstr The instruction length in bytes.
14978 * @param iCrReg The control register number (destination).
14979 * @param iGReg The general purpose register number (source).
14980 *
14981 * @remarks In ring-0 not all of the state needs to be synced in.
14982 */
14983VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
14984{
14985 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
14986 Assert(iCrReg < 16);
14987 Assert(iGReg < 16);
14988
14989 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14990 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
14991 Assert(!pVCpu->iem.s.cActiveMappings);
14992 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14993}
14994
14995
14996/**
14997 * Interface for HM and EM to read from a CRx register.
14998 *
14999 * @returns Strict VBox status code.
15000 * @param pVCpu The cross context virtual CPU structure.
15001 * @param cbInstr The instruction length in bytes.
15002 * @param iGReg The general purpose register number (destination).
15003 * @param iCrReg The control register number (source).
15004 *
15005 * @remarks In ring-0 not all of the state needs to be synced in.
15006 */
15007VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15008{
15009 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15010 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15011 | CPUMCTX_EXTRN_APIC_TPR);
15012 Assert(iCrReg < 16);
15013 Assert(iGReg < 16);
15014
15015 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15016 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15017 Assert(!pVCpu->iem.s.cActiveMappings);
15018 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15019}
15020
15021
15022/**
15023 * Interface for HM and EM to clear the CR0[TS] bit.
15024 *
15025 * @returns Strict VBox status code.
15026 * @param pVCpu The cross context virtual CPU structure.
15027 * @param cbInstr The instruction length in bytes.
15028 *
15029 * @remarks In ring-0 not all of the state needs to be synced in.
15030 */
15031VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15032{
15033 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15034
15035 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15036 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15037 Assert(!pVCpu->iem.s.cActiveMappings);
15038 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15039}
15040
15041
15042/**
15043 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15044 *
15045 * @returns Strict VBox status code.
15046 * @param pVCpu The cross context virtual CPU structure.
15047 * @param cbInstr The instruction length in bytes.
15048 * @param uValue The value to load into CR0.
15049 *
15050 * @remarks In ring-0 not all of the state needs to be synced in.
15051 */
15052VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue)
15053{
15054 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15055
15056 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15057 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_lmsw, uValue);
15058 Assert(!pVCpu->iem.s.cActiveMappings);
15059 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15060}
15061
15062
15063/**
15064 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15065 *
15066 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15067 *
15068 * @returns Strict VBox status code.
15069 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15070 * @param cbInstr The instruction length in bytes.
15071 * @remarks In ring-0 not all of the state needs to be synced in.
15072 * @thread EMT(pVCpu)
15073 */
15074VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15075{
15076 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15077
15078 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15079 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15080 Assert(!pVCpu->iem.s.cActiveMappings);
15081 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15082}
15083
15084
15085/**
15086 * Interface for HM and EM to emulate the WBINVD instruction.
15087 *
15088 * @returns Strict VBox status code.
15089 * @param pVCpu The cross context virtual CPU structure.
15090 * @param cbInstr The instruction length in bytes.
15091 *
15092 * @remarks In ring-0 not all of the state needs to be synced in.
15093 */
15094VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15095{
15096 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15097
15098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15099 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15100 Assert(!pVCpu->iem.s.cActiveMappings);
15101 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15102}
15103
15104
15105/**
15106 * Interface for HM and EM to emulate the INVD instruction.
15107 *
15108 * @returns Strict VBox status code.
15109 * @param pVCpu The cross context virtual CPU structure.
15110 * @param cbInstr The instruction length in bytes.
15111 *
15112 * @remarks In ring-0 not all of the state needs to be synced in.
15113 */
15114VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15115{
15116 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15117
15118 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15119 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15120 Assert(!pVCpu->iem.s.cActiveMappings);
15121 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15122}
15123
15124
15125/**
15126 * Interface for HM and EM to emulate the INVLPG instruction.
15127 *
15128 * @returns Strict VBox status code.
15129 * @retval VINF_PGM_SYNC_CR3
15130 *
15131 * @param pVCpu The cross context virtual CPU structure.
15132 * @param cbInstr The instruction length in bytes.
15133 * @param GCPtrPage The effective address of the page to invalidate.
15134 *
15135 * @remarks In ring-0 not all of the state needs to be synced in.
15136 */
15137VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15138{
15139 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15140
15141 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15142 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15143 Assert(!pVCpu->iem.s.cActiveMappings);
15144 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15145}
15146
15147
15148/**
15149 * Interface for HM and EM to emulate the CPUID instruction.
15150 *
15151 * @returns Strict VBox status code.
15152 *
15153 * @param pVCpu The cross context virtual CPU structure.
15154 * @param cbInstr The instruction length in bytes.
15155 *
15156 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15157 */
15158VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15159{
15160 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15161 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15162
15163 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15164 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15165 Assert(!pVCpu->iem.s.cActiveMappings);
15166 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15167}
15168
15169
15170/**
15171 * Interface for HM and EM to emulate the RDPMC instruction.
15172 *
15173 * @returns Strict VBox status code.
15174 *
15175 * @param pVCpu The cross context virtual CPU structure.
15176 * @param cbInstr The instruction length in bytes.
15177 *
15178 * @remarks Not all of the state needs to be synced in.
15179 */
15180VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15181{
15182 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15183 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15184
15185 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15186 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15187 Assert(!pVCpu->iem.s.cActiveMappings);
15188 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15189}
15190
15191
15192/**
15193 * Interface for HM and EM to emulate the RDTSC instruction.
15194 *
15195 * @returns Strict VBox status code.
15196 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15197 *
15198 * @param pVCpu The cross context virtual CPU structure.
15199 * @param cbInstr The instruction length in bytes.
15200 *
15201 * @remarks Not all of the state needs to be synced in.
15202 */
15203VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15204{
15205 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15206 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15207
15208 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15209 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15210 Assert(!pVCpu->iem.s.cActiveMappings);
15211 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15212}
15213
15214
15215/**
15216 * Interface for HM and EM to emulate the RDTSCP instruction.
15217 *
15218 * @returns Strict VBox status code.
15219 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15220 *
15221 * @param pVCpu The cross context virtual CPU structure.
15222 * @param cbInstr The instruction length in bytes.
15223 *
15224 * @remarks Not all of the state needs to be synced in. Recommended
15225 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15226 */
15227VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15228{
15229 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15230 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15231
15232 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15233 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15234 Assert(!pVCpu->iem.s.cActiveMappings);
15235 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15236}
15237
15238
15239/**
15240 * Interface for HM and EM to emulate the RDMSR instruction.
15241 *
15242 * @returns Strict VBox status code.
15243 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15244 *
15245 * @param pVCpu The cross context virtual CPU structure.
15246 * @param cbInstr The instruction length in bytes.
15247 *
15248 * @remarks Not all of the state needs to be synced in. Requires RCX and
15249 * (currently) all MSRs.
15250 */
15251VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15252{
15253 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15255
15256 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15257 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15258 Assert(!pVCpu->iem.s.cActiveMappings);
15259 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15260}
15261
15262
15263/**
15264 * Interface for HM and EM to emulate the WRMSR instruction.
15265 *
15266 * @returns Strict VBox status code.
15267 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15268 *
15269 * @param pVCpu The cross context virtual CPU structure.
15270 * @param cbInstr The instruction length in bytes.
15271 *
15272 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15273 * and (currently) all MSRs.
15274 */
15275VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15276{
15277 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15278 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15279 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15280
15281 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15282 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15283 Assert(!pVCpu->iem.s.cActiveMappings);
15284 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15285}
15286
15287
15288/**
15289 * Interface for HM and EM to emulate the MONITOR instruction.
15290 *
15291 * @returns Strict VBox status code.
15292 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15293 *
15294 * @param pVCpu The cross context virtual CPU structure.
15295 * @param cbInstr The instruction length in bytes.
15296 *
15297 * @remarks Not all of the state needs to be synced in.
15298 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15299 * are used.
15300 */
15301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15302{
15303 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15304 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15305
15306 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15307 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15308 Assert(!pVCpu->iem.s.cActiveMappings);
15309 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15310}
15311
15312
15313/**
15314 * Interface for HM and EM to emulate the MWAIT instruction.
15315 *
15316 * @returns Strict VBox status code.
15317 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15318 *
15319 * @param pVCpu The cross context virtual CPU structure.
15320 * @param cbInstr The instruction length in bytes.
15321 *
15322 * @remarks Not all of the state needs to be synced in.
15323 */
15324VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15325{
15326 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15327
15328 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15329 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15330 Assert(!pVCpu->iem.s.cActiveMappings);
15331 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15332}
15333
15334
15335/**
15336 * Interface for HM and EM to emulate the HLT instruction.
15337 *
15338 * @returns Strict VBox status code.
15339 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15340 *
15341 * @param pVCpu The cross context virtual CPU structure.
15342 * @param cbInstr The instruction length in bytes.
15343 *
15344 * @remarks Not all of the state needs to be synced in.
15345 */
15346VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15347{
15348 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15349
15350 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15352 Assert(!pVCpu->iem.s.cActiveMappings);
15353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15354}
15355
15356
15357/**
15358 * Checks if IEM is in the process of delivering an event (interrupt or
15359 * exception).
15360 *
15361 * @returns true if we're in the process of raising an interrupt or exception,
15362 * false otherwise.
15363 * @param pVCpu The cross context virtual CPU structure.
15364 * @param puVector Where to store the vector associated with the
15365 * currently delivered event, optional.
15366 * @param pfFlags Where to store th event delivery flags (see
15367 * IEM_XCPT_FLAGS_XXX), optional.
15368 * @param puErr Where to store the error code associated with the
15369 * event, optional.
15370 * @param puCr2 Where to store the CR2 associated with the event,
15371 * optional.
15372 * @remarks The caller should check the flags to determine if the error code and
15373 * CR2 are valid for the event.
15374 */
15375VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15376{
15377 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15378 if (fRaisingXcpt)
15379 {
15380 if (puVector)
15381 *puVector = pVCpu->iem.s.uCurXcpt;
15382 if (pfFlags)
15383 *pfFlags = pVCpu->iem.s.fCurXcpt;
15384 if (puErr)
15385 *puErr = pVCpu->iem.s.uCurXcptErr;
15386 if (puCr2)
15387 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15388 }
15389 return fRaisingXcpt;
15390}
15391
15392#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15393
15394/**
15395 * Interface for HM and EM to emulate the CLGI instruction.
15396 *
15397 * @returns Strict VBox status code.
15398 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15399 * @param cbInstr The instruction length in bytes.
15400 * @thread EMT(pVCpu)
15401 */
15402VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15403{
15404 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15405
15406 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15407 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15408 Assert(!pVCpu->iem.s.cActiveMappings);
15409 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15410}
15411
15412
15413/**
15414 * Interface for HM and EM to emulate the STGI instruction.
15415 *
15416 * @returns Strict VBox status code.
15417 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15418 * @param cbInstr The instruction length in bytes.
15419 * @thread EMT(pVCpu)
15420 */
15421VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15422{
15423 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15424
15425 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15426 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15427 Assert(!pVCpu->iem.s.cActiveMappings);
15428 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15429}
15430
15431
15432/**
15433 * Interface for HM and EM to emulate the VMLOAD instruction.
15434 *
15435 * @returns Strict VBox status code.
15436 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15437 * @param cbInstr The instruction length in bytes.
15438 * @thread EMT(pVCpu)
15439 */
15440VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15441{
15442 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15443
15444 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15445 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15446 Assert(!pVCpu->iem.s.cActiveMappings);
15447 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15448}
15449
15450
15451/**
15452 * Interface for HM and EM to emulate the VMSAVE instruction.
15453 *
15454 * @returns Strict VBox status code.
15455 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15456 * @param cbInstr The instruction length in bytes.
15457 * @thread EMT(pVCpu)
15458 */
15459VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15460{
15461 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15462
15463 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15464 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15465 Assert(!pVCpu->iem.s.cActiveMappings);
15466 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15467}
15468
15469
15470/**
15471 * Interface for HM and EM to emulate the INVLPGA instruction.
15472 *
15473 * @returns Strict VBox status code.
15474 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15475 * @param cbInstr The instruction length in bytes.
15476 * @thread EMT(pVCpu)
15477 */
15478VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15479{
15480 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15481
15482 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15483 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15484 Assert(!pVCpu->iem.s.cActiveMappings);
15485 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15486}
15487
15488
15489/**
15490 * Interface for HM and EM to emulate the VMRUN instruction.
15491 *
15492 * @returns Strict VBox status code.
15493 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15494 * @param cbInstr The instruction length in bytes.
15495 * @thread EMT(pVCpu)
15496 */
15497VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15498{
15499 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15500 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15501
15502 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15503 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15504 Assert(!pVCpu->iem.s.cActiveMappings);
15505 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15506}
15507
15508
15509/**
15510 * Interface for HM and EM to emulate \#VMEXIT.
15511 *
15512 * @returns Strict VBox status code.
15513 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15514 * @param uExitCode The exit code.
15515 * @param uExitInfo1 The exit info. 1 field.
15516 * @param uExitInfo2 The exit info. 2 field.
15517 * @thread EMT(pVCpu)
15518 */
15519VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15520{
15521 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15522 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15523 if (pVCpu->iem.s.cActiveMappings)
15524 iemMemRollback(pVCpu);
15525 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15526}
15527
15528#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15529
15530#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15531
15532/**
15533 * Interface for HM and EM to emulate the VMREAD instruction.
15534 *
15535 * @returns Strict VBox status code.
15536 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15537 * @param pExitInfo Pointer to the VM-exit information struct.
15538 * @thread EMT(pVCpu)
15539 */
15540VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15541{
15542 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15543 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15544 Assert(pExitInfo);
15545
15546 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15547
15548 VBOXSTRICTRC rcStrict;
15549 uint8_t const cbInstr = pExitInfo->cbInstr;
15550 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15551 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15552 {
15553 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15554 {
15555 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15556 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15557 }
15558 else
15559 {
15560 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15561 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15562 }
15563 }
15564 else
15565 {
15566 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15567 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15568 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15569 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15570 }
15571 if (pVCpu->iem.s.cActiveMappings)
15572 iemMemRollback(pVCpu);
15573 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15574}
15575
15576
15577/**
15578 * Interface for HM and EM to emulate the VMWRITE instruction.
15579 *
15580 * @returns Strict VBox status code.
15581 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15582 * @param pExitInfo Pointer to the VM-exit information struct.
15583 * @thread EMT(pVCpu)
15584 */
15585VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15586{
15587 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15588 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15589 Assert(pExitInfo);
15590
15591 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15592
15593 uint64_t u64Val;
15594 uint8_t iEffSeg;
15595 IEMMODE enmEffAddrMode;
15596 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15597 {
15598 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15599 iEffSeg = UINT8_MAX;
15600 enmEffAddrMode = UINT8_MAX;
15601 }
15602 else
15603 {
15604 u64Val = pExitInfo->GCPtrEffAddr;
15605 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15606 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15607 }
15608 uint8_t const cbInstr = pExitInfo->cbInstr;
15609 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15610 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15611 if (pVCpu->iem.s.cActiveMappings)
15612 iemMemRollback(pVCpu);
15613 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15614}
15615
15616
15617/**
15618 * Interface for HM and EM to emulate the VMPTRLD instruction.
15619 *
15620 * @returns Strict VBox status code.
15621 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15622 * @param pExitInfo Pointer to the VM-exit information struct.
15623 * @thread EMT(pVCpu)
15624 */
15625VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15626{
15627 Assert(pExitInfo);
15628 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15629 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15630
15631 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15632
15633 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15634 uint8_t const cbInstr = pExitInfo->cbInstr;
15635 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15636 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15637 if (pVCpu->iem.s.cActiveMappings)
15638 iemMemRollback(pVCpu);
15639 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15640}
15641
15642
15643/**
15644 * Interface for HM and EM to emulate the VMPTRST instruction.
15645 *
15646 * @returns Strict VBox status code.
15647 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15648 * @param pExitInfo Pointer to the VM-exit information struct.
15649 * @thread EMT(pVCpu)
15650 */
15651VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15652{
15653 Assert(pExitInfo);
15654 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15655 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15656
15657 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15658
15659 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15660 uint8_t const cbInstr = pExitInfo->cbInstr;
15661 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15662 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15663 if (pVCpu->iem.s.cActiveMappings)
15664 iemMemRollback(pVCpu);
15665 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15666}
15667
15668
15669/**
15670 * Interface for HM and EM to emulate the VMCLEAR instruction.
15671 *
15672 * @returns Strict VBox status code.
15673 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15674 * @param pExitInfo Pointer to the VM-exit information struct.
15675 * @thread EMT(pVCpu)
15676 */
15677VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15678{
15679 Assert(pExitInfo);
15680 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15681 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15682
15683 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15684
15685 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15686 uint8_t const cbInstr = pExitInfo->cbInstr;
15687 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15688 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15689 if (pVCpu->iem.s.cActiveMappings)
15690 iemMemRollback(pVCpu);
15691 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15692}
15693
15694
15695/**
15696 * Interface for HM and EM to emulate the VMXON instruction.
15697 *
15698 * @returns Strict VBox status code.
15699 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15700 * @param pExitInfo Pointer to the VM-exit information struct.
15701 * @thread EMT(pVCpu)
15702 */
15703VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15704{
15705 Assert(pExitInfo);
15706 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15707 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15708
15709 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15710
15711 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15712 uint8_t const cbInstr = pExitInfo->cbInstr;
15713 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15714 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15715 if (pVCpu->iem.s.cActiveMappings)
15716 iemMemRollback(pVCpu);
15717 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15718}
15719
15720
15721/**
15722 * Interface for HM and EM to emulate the VMXOFF instruction.
15723 *
15724 * @returns Strict VBox status code.
15725 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15726 * @param cbInstr The instruction length in bytes.
15727 * @thread EMT(pVCpu)
15728 */
15729VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15730{
15731 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15732
15733 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15734 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15735 Assert(!pVCpu->iem.s.cActiveMappings);
15736 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15737}
15738
15739#endif
15740
15741#ifdef IN_RING3
15742
15743/**
15744 * Handles the unlikely and probably fatal merge cases.
15745 *
15746 * @returns Merged status code.
15747 * @param rcStrict Current EM status code.
15748 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15749 * with @a rcStrict.
15750 * @param iMemMap The memory mapping index. For error reporting only.
15751 * @param pVCpu The cross context virtual CPU structure of the calling
15752 * thread, for error reporting only.
15753 */
15754DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15755 unsigned iMemMap, PVMCPU pVCpu)
15756{
15757 if (RT_FAILURE_NP(rcStrict))
15758 return rcStrict;
15759
15760 if (RT_FAILURE_NP(rcStrictCommit))
15761 return rcStrictCommit;
15762
15763 if (rcStrict == rcStrictCommit)
15764 return rcStrictCommit;
15765
15766 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15767 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15768 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15769 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15770 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15771 return VERR_IOM_FF_STATUS_IPE;
15772}
15773
15774
15775/**
15776 * Helper for IOMR3ProcessForceFlag.
15777 *
15778 * @returns Merged status code.
15779 * @param rcStrict Current EM status code.
15780 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15781 * with @a rcStrict.
15782 * @param iMemMap The memory mapping index. For error reporting only.
15783 * @param pVCpu The cross context virtual CPU structure of the calling
15784 * thread, for error reporting only.
15785 */
15786DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15787{
15788 /* Simple. */
15789 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15790 return rcStrictCommit;
15791
15792 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15793 return rcStrict;
15794
15795 /* EM scheduling status codes. */
15796 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15797 && rcStrict <= VINF_EM_LAST))
15798 {
15799 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15800 && rcStrictCommit <= VINF_EM_LAST))
15801 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15802 }
15803
15804 /* Unlikely */
15805 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15806}
15807
15808
15809/**
15810 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15811 *
15812 * @returns Merge between @a rcStrict and what the commit operation returned.
15813 * @param pVM The cross context VM structure.
15814 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15815 * @param rcStrict The status code returned by ring-0 or raw-mode.
15816 */
15817VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15818{
15819 /*
15820 * Reset the pending commit.
15821 */
15822 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15823 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15824 ("%#x %#x %#x\n",
15825 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15826 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15827
15828 /*
15829 * Commit the pending bounce buffers (usually just one).
15830 */
15831 unsigned cBufs = 0;
15832 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15833 while (iMemMap-- > 0)
15834 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15835 {
15836 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15837 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15838 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15839
15840 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15841 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15842 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15843
15844 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15845 {
15846 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15847 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15848 pbBuf,
15849 cbFirst,
15850 PGMACCESSORIGIN_IEM);
15851 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15852 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15853 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15854 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15855 }
15856
15857 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15858 {
15859 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15860 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15861 pbBuf + cbFirst,
15862 cbSecond,
15863 PGMACCESSORIGIN_IEM);
15864 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15865 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15866 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15867 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15868 }
15869 cBufs++;
15870 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15871 }
15872
15873 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15874 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15875 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15876 pVCpu->iem.s.cActiveMappings = 0;
15877 return rcStrict;
15878}
15879
15880#endif /* IN_RING3 */
15881
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette