VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 74751

Last change on this file since 74751 was 74751, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 VM-exit bits; Added triple fault intercept. Fixed task switch intercept to include the VM-exit instruciton length.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 631.4 KB
Line 
1/* $Id: IEMAll.cpp 74751 2018-10-11 05:01:25Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/assert.h>
121#include <iprt/string.h>
122#include <iprt/x86.h>
123
124
125/*********************************************************************************************************************************
126* Structures and Typedefs *
127*********************************************************************************************************************************/
128/** @typedef PFNIEMOP
129 * Pointer to an opcode decoder function.
130 */
131
132/** @def FNIEMOP_DEF
133 * Define an opcode decoder function.
134 *
135 * We're using macors for this so that adding and removing parameters as well as
136 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
137 *
138 * @param a_Name The function name.
139 */
140
141/** @typedef PFNIEMOPRM
142 * Pointer to an opcode decoder function with RM byte.
143 */
144
145/** @def FNIEMOPRM_DEF
146 * Define an opcode decoder function with RM byte.
147 *
148 * We're using macors for this so that adding and removing parameters as well as
149 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
150 *
151 * @param a_Name The function name.
152 */
153
154#if defined(__GNUC__) && defined(RT_ARCH_X86)
155typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
157# define FNIEMOP_DEF(a_Name) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
159# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
161# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
162 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
163
164#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
165typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
167# define FNIEMOP_DEF(a_Name) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
171# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
172 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
173
174#elif defined(__GNUC__)
175typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
176typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
177# define FNIEMOP_DEF(a_Name) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
179# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
181# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
182 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
183
184#else
185typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
186typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
187# define FNIEMOP_DEF(a_Name) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
191# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
192 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
193
194#endif
195#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
196
197
198/**
199 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
200 */
201typedef union IEMSELDESC
202{
203 /** The legacy view. */
204 X86DESC Legacy;
205 /** The long mode view. */
206 X86DESC64 Long;
207} IEMSELDESC;
208/** Pointer to a selector descriptor table entry. */
209typedef IEMSELDESC *PIEMSELDESC;
210
211/**
212 * CPU exception classes.
213 */
214typedef enum IEMXCPTCLASS
215{
216 IEMXCPTCLASS_BENIGN,
217 IEMXCPTCLASS_CONTRIBUTORY,
218 IEMXCPTCLASS_PAGE_FAULT,
219 IEMXCPTCLASS_DOUBLE_FAULT
220} IEMXCPTCLASS;
221
222
223/*********************************************************************************************************************************
224* Defined Constants And Macros *
225*********************************************************************************************************************************/
226/** @def IEM_WITH_SETJMP
227 * Enables alternative status code handling using setjmps.
228 *
229 * This adds a bit of expense via the setjmp() call since it saves all the
230 * non-volatile registers. However, it eliminates return code checks and allows
231 * for more optimal return value passing (return regs instead of stack buffer).
232 */
233#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
234# define IEM_WITH_SETJMP
235#endif
236
237/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
238 * due to GCC lacking knowledge about the value range of a switch. */
239#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
240
241/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
242#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
243
244/**
245 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
246 * occation.
247 */
248#ifdef LOG_ENABLED
249# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
250 do { \
251 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
252 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
253 } while (0)
254#else
255# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
256 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
257#endif
258
259/**
260 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
261 * occation using the supplied logger statement.
262 *
263 * @param a_LoggerArgs What to log on failure.
264 */
265#ifdef LOG_ENABLED
266# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
267 do { \
268 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
269 /*LogFunc(a_LoggerArgs);*/ \
270 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
271 } while (0)
272#else
273# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
274 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
275#endif
276
277/**
278 * Call an opcode decoder function.
279 *
280 * We're using macors for this so that adding and removing parameters can be
281 * done as we please. See FNIEMOP_DEF.
282 */
283#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
284
285/**
286 * Call a common opcode decoder function taking one extra argument.
287 *
288 * We're using macors for this so that adding and removing parameters can be
289 * done as we please. See FNIEMOP_DEF_1.
290 */
291#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
292
293/**
294 * Call a common opcode decoder function taking one extra argument.
295 *
296 * We're using macors for this so that adding and removing parameters can be
297 * done as we please. See FNIEMOP_DEF_1.
298 */
299#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
300
301/**
302 * Check if we're currently executing in real or virtual 8086 mode.
303 *
304 * @returns @c true if it is, @c false if not.
305 * @param a_pVCpu The IEM state of the current CPU.
306 */
307#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
308
309/**
310 * Check if we're currently executing in virtual 8086 mode.
311 *
312 * @returns @c true if it is, @c false if not.
313 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
314 */
315#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
316
317/**
318 * Check if we're currently executing in long mode.
319 *
320 * @returns @c true if it is, @c false if not.
321 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
322 */
323#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
324
325/**
326 * Check if we're currently executing in a 64-bit code segment.
327 *
328 * @returns @c true if it is, @c false if not.
329 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
330 */
331#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
332
333/**
334 * Check if we're currently executing in real mode.
335 *
336 * @returns @c true if it is, @c false if not.
337 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
338 */
339#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
340
341/**
342 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
343 * @returns PCCPUMFEATURES
344 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
345 */
346#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
347
348/**
349 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
350 * @returns PCCPUMFEATURES
351 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
352 */
353#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
354
355/**
356 * Evaluates to true if we're presenting an Intel CPU to the guest.
357 */
358#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
359
360/**
361 * Evaluates to true if we're presenting an AMD CPU to the guest.
362 */
363#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
364
365/**
366 * Check if the address is canonical.
367 */
368#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
369
370/**
371 * Gets the effective VEX.VVVV value.
372 *
373 * The 4th bit is ignored if not 64-bit code.
374 * @returns effective V-register value.
375 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
376 */
377#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
378 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
379
380/** @def IEM_USE_UNALIGNED_DATA_ACCESS
381 * Use unaligned accesses instead of elaborate byte assembly. */
382#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
383# define IEM_USE_UNALIGNED_DATA_ACCESS
384#endif
385
386#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
387
388/**
389 * Check if the guest has entered VMX root operation.
390 */
391# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
392
393/**
394 * Check if the guest has entered VMX non-root operation.
395 */
396# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
397
398/**
399 * Check if the nested-guest has the given Pin-based VM-execution control set.
400 */
401# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
402 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
403
404/**
405 * Check if the nested-guest has the given Processor-based VM-execution control set.
406 */
407#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
408 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
409
410/**
411 * Check if the nested-guest has the given Secondary Processor-based VM-execution
412 * control set.
413 */
414#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
415 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
416
417/**
418 * Invokes the VMX VM-exit handler for an instruction intercept.
419 */
420# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
421 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
422
423/**
424 * Invokes the VMX VM-exit handler for an instruction intercept where the
425 * instruction provides additional VM-exit information.
426 */
427# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
428 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
429
430/**
431 * Invokes the VMX VM-exit handler for a task switch.
432 */
433# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
434 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
435
436/**
437 * Invokes the VMX VM-exit handler for MWAIT.
438 */
439# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
440 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
441
442/**
443 * Invokes the VMX VM-exit handle for triple faults.
444 */
445# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
446 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
447
448#else
449# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
450# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
451# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
453# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
454# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
457# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
458# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
459
460#endif
461
462#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
463/**
464 * Check if an SVM control/instruction intercept is set.
465 */
466# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
467 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
468
469/**
470 * Check if an SVM read CRx intercept is set.
471 */
472# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
473 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
474
475/**
476 * Check if an SVM write CRx intercept is set.
477 */
478# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
479 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
480
481/**
482 * Check if an SVM read DRx intercept is set.
483 */
484# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
485 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
486
487/**
488 * Check if an SVM write DRx intercept is set.
489 */
490# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
491 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
492
493/**
494 * Check if an SVM exception intercept is set.
495 */
496# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
497 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
498
499/**
500 * Invokes the SVM \#VMEXIT handler for the nested-guest.
501 */
502# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
503 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
504
505/**
506 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
507 * corresponding decode assist information.
508 */
509# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
510 do \
511 { \
512 uint64_t uExitInfo1; \
513 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
514 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
515 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
516 else \
517 uExitInfo1 = 0; \
518 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
519 } while (0)
520
521/** Check and handles SVM nested-guest instruction intercept and updates
522 * NRIP if needed.
523 */
524# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
525 do \
526 { \
527 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
528 { \
529 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
530 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
531 } \
532 } while (0)
533
534/** Checks and handles SVM nested-guest CR0 read intercept. */
535# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
536 do \
537 { \
538 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
539 { /* probably likely */ } \
540 else \
541 { \
542 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
543 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
544 } \
545 } while (0)
546
547/**
548 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
549 */
550# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
551 do { \
552 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
553 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
554 } while (0)
555
556#else
557# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
558# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
559# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
560# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
561# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
562# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
563# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
564# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
565# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
566# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
567# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
568
569#endif
570
571
572/*********************************************************************************************************************************
573* Global Variables *
574*********************************************************************************************************************************/
575extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
576
577
578/** Function table for the ADD instruction. */
579IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
580{
581 iemAImpl_add_u8, iemAImpl_add_u8_locked,
582 iemAImpl_add_u16, iemAImpl_add_u16_locked,
583 iemAImpl_add_u32, iemAImpl_add_u32_locked,
584 iemAImpl_add_u64, iemAImpl_add_u64_locked
585};
586
587/** Function table for the ADC instruction. */
588IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
589{
590 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
591 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
592 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
593 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
594};
595
596/** Function table for the SUB instruction. */
597IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
598{
599 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
600 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
601 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
602 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
603};
604
605/** Function table for the SBB instruction. */
606IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
607{
608 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
609 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
610 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
611 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
612};
613
614/** Function table for the OR instruction. */
615IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
616{
617 iemAImpl_or_u8, iemAImpl_or_u8_locked,
618 iemAImpl_or_u16, iemAImpl_or_u16_locked,
619 iemAImpl_or_u32, iemAImpl_or_u32_locked,
620 iemAImpl_or_u64, iemAImpl_or_u64_locked
621};
622
623/** Function table for the XOR instruction. */
624IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
625{
626 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
627 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
628 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
629 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
630};
631
632/** Function table for the AND instruction. */
633IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
634{
635 iemAImpl_and_u8, iemAImpl_and_u8_locked,
636 iemAImpl_and_u16, iemAImpl_and_u16_locked,
637 iemAImpl_and_u32, iemAImpl_and_u32_locked,
638 iemAImpl_and_u64, iemAImpl_and_u64_locked
639};
640
641/** Function table for the CMP instruction.
642 * @remarks Making operand order ASSUMPTIONS.
643 */
644IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
645{
646 iemAImpl_cmp_u8, NULL,
647 iemAImpl_cmp_u16, NULL,
648 iemAImpl_cmp_u32, NULL,
649 iemAImpl_cmp_u64, NULL
650};
651
652/** Function table for the TEST instruction.
653 * @remarks Making operand order ASSUMPTIONS.
654 */
655IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
656{
657 iemAImpl_test_u8, NULL,
658 iemAImpl_test_u16, NULL,
659 iemAImpl_test_u32, NULL,
660 iemAImpl_test_u64, NULL
661};
662
663/** Function table for the BT instruction. */
664IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
665{
666 NULL, NULL,
667 iemAImpl_bt_u16, NULL,
668 iemAImpl_bt_u32, NULL,
669 iemAImpl_bt_u64, NULL
670};
671
672/** Function table for the BTC instruction. */
673IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
674{
675 NULL, NULL,
676 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
677 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
678 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
679};
680
681/** Function table for the BTR instruction. */
682IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
683{
684 NULL, NULL,
685 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
686 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
687 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
688};
689
690/** Function table for the BTS instruction. */
691IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
692{
693 NULL, NULL,
694 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
695 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
696 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
697};
698
699/** Function table for the BSF instruction. */
700IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
701{
702 NULL, NULL,
703 iemAImpl_bsf_u16, NULL,
704 iemAImpl_bsf_u32, NULL,
705 iemAImpl_bsf_u64, NULL
706};
707
708/** Function table for the BSR instruction. */
709IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
710{
711 NULL, NULL,
712 iemAImpl_bsr_u16, NULL,
713 iemAImpl_bsr_u32, NULL,
714 iemAImpl_bsr_u64, NULL
715};
716
717/** Function table for the IMUL instruction. */
718IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
719{
720 NULL, NULL,
721 iemAImpl_imul_two_u16, NULL,
722 iemAImpl_imul_two_u32, NULL,
723 iemAImpl_imul_two_u64, NULL
724};
725
726/** Group 1 /r lookup table. */
727IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
728{
729 &g_iemAImpl_add,
730 &g_iemAImpl_or,
731 &g_iemAImpl_adc,
732 &g_iemAImpl_sbb,
733 &g_iemAImpl_and,
734 &g_iemAImpl_sub,
735 &g_iemAImpl_xor,
736 &g_iemAImpl_cmp
737};
738
739/** Function table for the INC instruction. */
740IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
741{
742 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
743 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
744 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
745 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
746};
747
748/** Function table for the DEC instruction. */
749IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
750{
751 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
752 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
753 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
754 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
755};
756
757/** Function table for the NEG instruction. */
758IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
759{
760 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
761 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
762 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
763 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
764};
765
766/** Function table for the NOT instruction. */
767IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
768{
769 iemAImpl_not_u8, iemAImpl_not_u8_locked,
770 iemAImpl_not_u16, iemAImpl_not_u16_locked,
771 iemAImpl_not_u32, iemAImpl_not_u32_locked,
772 iemAImpl_not_u64, iemAImpl_not_u64_locked
773};
774
775
776/** Function table for the ROL instruction. */
777IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
778{
779 iemAImpl_rol_u8,
780 iemAImpl_rol_u16,
781 iemAImpl_rol_u32,
782 iemAImpl_rol_u64
783};
784
785/** Function table for the ROR instruction. */
786IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
787{
788 iemAImpl_ror_u8,
789 iemAImpl_ror_u16,
790 iemAImpl_ror_u32,
791 iemAImpl_ror_u64
792};
793
794/** Function table for the RCL instruction. */
795IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
796{
797 iemAImpl_rcl_u8,
798 iemAImpl_rcl_u16,
799 iemAImpl_rcl_u32,
800 iemAImpl_rcl_u64
801};
802
803/** Function table for the RCR instruction. */
804IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
805{
806 iemAImpl_rcr_u8,
807 iemAImpl_rcr_u16,
808 iemAImpl_rcr_u32,
809 iemAImpl_rcr_u64
810};
811
812/** Function table for the SHL instruction. */
813IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
814{
815 iemAImpl_shl_u8,
816 iemAImpl_shl_u16,
817 iemAImpl_shl_u32,
818 iemAImpl_shl_u64
819};
820
821/** Function table for the SHR instruction. */
822IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
823{
824 iemAImpl_shr_u8,
825 iemAImpl_shr_u16,
826 iemAImpl_shr_u32,
827 iemAImpl_shr_u64
828};
829
830/** Function table for the SAR instruction. */
831IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
832{
833 iemAImpl_sar_u8,
834 iemAImpl_sar_u16,
835 iemAImpl_sar_u32,
836 iemAImpl_sar_u64
837};
838
839
840/** Function table for the MUL instruction. */
841IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
842{
843 iemAImpl_mul_u8,
844 iemAImpl_mul_u16,
845 iemAImpl_mul_u32,
846 iemAImpl_mul_u64
847};
848
849/** Function table for the IMUL instruction working implicitly on rAX. */
850IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
851{
852 iemAImpl_imul_u8,
853 iemAImpl_imul_u16,
854 iemAImpl_imul_u32,
855 iemAImpl_imul_u64
856};
857
858/** Function table for the DIV instruction. */
859IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
860{
861 iemAImpl_div_u8,
862 iemAImpl_div_u16,
863 iemAImpl_div_u32,
864 iemAImpl_div_u64
865};
866
867/** Function table for the MUL instruction. */
868IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
869{
870 iemAImpl_idiv_u8,
871 iemAImpl_idiv_u16,
872 iemAImpl_idiv_u32,
873 iemAImpl_idiv_u64
874};
875
876/** Function table for the SHLD instruction */
877IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
878{
879 iemAImpl_shld_u16,
880 iemAImpl_shld_u32,
881 iemAImpl_shld_u64,
882};
883
884/** Function table for the SHRD instruction */
885IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
886{
887 iemAImpl_shrd_u16,
888 iemAImpl_shrd_u32,
889 iemAImpl_shrd_u64,
890};
891
892
893/** Function table for the PUNPCKLBW instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
895/** Function table for the PUNPCKLBD instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
897/** Function table for the PUNPCKLDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
899/** Function table for the PUNPCKLQDQ instruction */
900IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
901
902/** Function table for the PUNPCKHBW instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
904/** Function table for the PUNPCKHBD instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
906/** Function table for the PUNPCKHDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
908/** Function table for the PUNPCKHQDQ instruction */
909IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
910
911/** Function table for the PXOR instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
913/** Function table for the PCMPEQB instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
915/** Function table for the PCMPEQW instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
917/** Function table for the PCMPEQD instruction */
918IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
919
920
921#if defined(IEM_LOG_MEMORY_WRITES)
922/** What IEM just wrote. */
923uint8_t g_abIemWrote[256];
924/** How much IEM just wrote. */
925size_t g_cbIemWrote;
926#endif
927
928
929/*********************************************************************************************************************************
930* Internal Functions *
931*********************************************************************************************************************************/
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
934IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
935IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
936/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
937IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
940IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
942IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
943IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
946IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
947IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
948IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
949#ifdef IEM_WITH_SETJMP
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
953DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
954DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
955#endif
956
957IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
958IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
959IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
970IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
971IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
972IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
973IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
974
975#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2,
978 uint8_t cbInstr);
979IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
980#endif
981
982#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
983IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
984IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr,
985 uint64_t uCr2);
986#endif
987
988
989/**
990 * Sets the pass up status.
991 *
992 * @returns VINF_SUCCESS.
993 * @param pVCpu The cross context virtual CPU structure of the
994 * calling thread.
995 * @param rcPassUp The pass up status. Must be informational.
996 * VINF_SUCCESS is not allowed.
997 */
998IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
999{
1000 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1001
1002 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1003 if (rcOldPassUp == VINF_SUCCESS)
1004 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1005 /* If both are EM scheduling codes, use EM priority rules. */
1006 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1007 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1008 {
1009 if (rcPassUp < rcOldPassUp)
1010 {
1011 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1012 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1013 }
1014 else
1015 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1016 }
1017 /* Override EM scheduling with specific status code. */
1018 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1019 {
1020 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1021 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1022 }
1023 /* Don't override specific status code, first come first served. */
1024 else
1025 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1026 return VINF_SUCCESS;
1027}
1028
1029
1030/**
1031 * Calculates the CPU mode.
1032 *
1033 * This is mainly for updating IEMCPU::enmCpuMode.
1034 *
1035 * @returns CPU mode.
1036 * @param pVCpu The cross context virtual CPU structure of the
1037 * calling thread.
1038 */
1039DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1040{
1041 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1042 return IEMMODE_64BIT;
1043 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1044 return IEMMODE_32BIT;
1045 return IEMMODE_16BIT;
1046}
1047
1048
1049/**
1050 * Initializes the execution state.
1051 *
1052 * @param pVCpu The cross context virtual CPU structure of the
1053 * calling thread.
1054 * @param fBypassHandlers Whether to bypass access handlers.
1055 *
1056 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1057 * side-effects in strict builds.
1058 */
1059DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1060{
1061 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1062 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1063
1064#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1073#endif
1074
1075#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1076 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1077#endif
1078 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1079 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1080#ifdef VBOX_STRICT
1081 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1082 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1083 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1084 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1085 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1086 pVCpu->iem.s.uRexReg = 127;
1087 pVCpu->iem.s.uRexB = 127;
1088 pVCpu->iem.s.offModRm = 127;
1089 pVCpu->iem.s.uRexIndex = 127;
1090 pVCpu->iem.s.iEffSeg = 127;
1091 pVCpu->iem.s.idxPrefix = 127;
1092 pVCpu->iem.s.uVex3rdReg = 127;
1093 pVCpu->iem.s.uVexLength = 127;
1094 pVCpu->iem.s.fEvexStuff = 127;
1095 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1096# ifdef IEM_WITH_CODE_TLB
1097 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1098 pVCpu->iem.s.pbInstrBuf = NULL;
1099 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1100 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1101 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1102 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1103# else
1104 pVCpu->iem.s.offOpcode = 127;
1105 pVCpu->iem.s.cbOpcode = 127;
1106# endif
1107#endif
1108
1109 pVCpu->iem.s.cActiveMappings = 0;
1110 pVCpu->iem.s.iNextMapping = 0;
1111 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1112 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1113#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1114 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1115 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1116 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1117 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1118 if (!pVCpu->iem.s.fInPatchCode)
1119 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1120#endif
1121}
1122
1123#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1124/**
1125 * Performs a minimal reinitialization of the execution state.
1126 *
1127 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1128 * 'world-switch' types operations on the CPU. Currently only nested
1129 * hardware-virtualization uses it.
1130 *
1131 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1132 */
1133IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1134{
1135 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1136 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1137
1138 pVCpu->iem.s.uCpl = uCpl;
1139 pVCpu->iem.s.enmCpuMode = enmMode;
1140 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1141 pVCpu->iem.s.enmEffAddrMode = enmMode;
1142 if (enmMode != IEMMODE_64BIT)
1143 {
1144 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1145 pVCpu->iem.s.enmEffOpSize = enmMode;
1146 }
1147 else
1148 {
1149 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1150 pVCpu->iem.s.enmEffOpSize = enmMode;
1151 }
1152 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1153#ifndef IEM_WITH_CODE_TLB
1154 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1155 pVCpu->iem.s.offOpcode = 0;
1156 pVCpu->iem.s.cbOpcode = 0;
1157#endif
1158 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1159}
1160#endif
1161
1162/**
1163 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1164 *
1165 * @param pVCpu The cross context virtual CPU structure of the
1166 * calling thread.
1167 */
1168DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1169{
1170 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1171#ifdef VBOX_STRICT
1172# ifdef IEM_WITH_CODE_TLB
1173 NOREF(pVCpu);
1174# else
1175 pVCpu->iem.s.cbOpcode = 0;
1176# endif
1177#else
1178 NOREF(pVCpu);
1179#endif
1180}
1181
1182
1183/**
1184 * Initializes the decoder state.
1185 *
1186 * iemReInitDecoder is mostly a copy of this function.
1187 *
1188 * @param pVCpu The cross context virtual CPU structure of the
1189 * calling thread.
1190 * @param fBypassHandlers Whether to bypass access handlers.
1191 */
1192DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1193{
1194 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1195 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1196
1197#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1198 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1199 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1200 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1201 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1206#endif
1207
1208#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1209 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1210#endif
1211 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1212 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1213 pVCpu->iem.s.enmCpuMode = enmMode;
1214 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1215 pVCpu->iem.s.enmEffAddrMode = enmMode;
1216 if (enmMode != IEMMODE_64BIT)
1217 {
1218 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1219 pVCpu->iem.s.enmEffOpSize = enmMode;
1220 }
1221 else
1222 {
1223 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1224 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1225 }
1226 pVCpu->iem.s.fPrefixes = 0;
1227 pVCpu->iem.s.uRexReg = 0;
1228 pVCpu->iem.s.uRexB = 0;
1229 pVCpu->iem.s.uRexIndex = 0;
1230 pVCpu->iem.s.idxPrefix = 0;
1231 pVCpu->iem.s.uVex3rdReg = 0;
1232 pVCpu->iem.s.uVexLength = 0;
1233 pVCpu->iem.s.fEvexStuff = 0;
1234 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1235#ifdef IEM_WITH_CODE_TLB
1236 pVCpu->iem.s.pbInstrBuf = NULL;
1237 pVCpu->iem.s.offInstrNextByte = 0;
1238 pVCpu->iem.s.offCurInstrStart = 0;
1239# ifdef VBOX_STRICT
1240 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1241 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1242 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1243# endif
1244#else
1245 pVCpu->iem.s.offOpcode = 0;
1246 pVCpu->iem.s.cbOpcode = 0;
1247#endif
1248 pVCpu->iem.s.offModRm = 0;
1249 pVCpu->iem.s.cActiveMappings = 0;
1250 pVCpu->iem.s.iNextMapping = 0;
1251 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1252 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1253#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1254 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1255 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1256 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1257 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1258 if (!pVCpu->iem.s.fInPatchCode)
1259 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1260#endif
1261
1262#ifdef DBGFTRACE_ENABLED
1263 switch (enmMode)
1264 {
1265 case IEMMODE_64BIT:
1266 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1267 break;
1268 case IEMMODE_32BIT:
1269 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1270 break;
1271 case IEMMODE_16BIT:
1272 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1273 break;
1274 }
1275#endif
1276}
1277
1278
1279/**
1280 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1281 *
1282 * This is mostly a copy of iemInitDecoder.
1283 *
1284 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1285 */
1286DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1287{
1288 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1289
1290#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1291 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1292 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1293 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1294 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1295 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1298 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1299#endif
1300
1301 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1302 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1303 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1304 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1305 pVCpu->iem.s.enmEffAddrMode = enmMode;
1306 if (enmMode != IEMMODE_64BIT)
1307 {
1308 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1309 pVCpu->iem.s.enmEffOpSize = enmMode;
1310 }
1311 else
1312 {
1313 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1314 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1315 }
1316 pVCpu->iem.s.fPrefixes = 0;
1317 pVCpu->iem.s.uRexReg = 0;
1318 pVCpu->iem.s.uRexB = 0;
1319 pVCpu->iem.s.uRexIndex = 0;
1320 pVCpu->iem.s.idxPrefix = 0;
1321 pVCpu->iem.s.uVex3rdReg = 0;
1322 pVCpu->iem.s.uVexLength = 0;
1323 pVCpu->iem.s.fEvexStuff = 0;
1324 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1325#ifdef IEM_WITH_CODE_TLB
1326 if (pVCpu->iem.s.pbInstrBuf)
1327 {
1328 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1329 - pVCpu->iem.s.uInstrBufPc;
1330 if (off < pVCpu->iem.s.cbInstrBufTotal)
1331 {
1332 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1333 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1334 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1335 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1336 else
1337 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1338 }
1339 else
1340 {
1341 pVCpu->iem.s.pbInstrBuf = NULL;
1342 pVCpu->iem.s.offInstrNextByte = 0;
1343 pVCpu->iem.s.offCurInstrStart = 0;
1344 pVCpu->iem.s.cbInstrBuf = 0;
1345 pVCpu->iem.s.cbInstrBufTotal = 0;
1346 }
1347 }
1348 else
1349 {
1350 pVCpu->iem.s.offInstrNextByte = 0;
1351 pVCpu->iem.s.offCurInstrStart = 0;
1352 pVCpu->iem.s.cbInstrBuf = 0;
1353 pVCpu->iem.s.cbInstrBufTotal = 0;
1354 }
1355#else
1356 pVCpu->iem.s.cbOpcode = 0;
1357 pVCpu->iem.s.offOpcode = 0;
1358#endif
1359 pVCpu->iem.s.offModRm = 0;
1360 Assert(pVCpu->iem.s.cActiveMappings == 0);
1361 pVCpu->iem.s.iNextMapping = 0;
1362 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1363 Assert(pVCpu->iem.s.fBypassHandlers == false);
1364#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1365 if (!pVCpu->iem.s.fInPatchCode)
1366 { /* likely */ }
1367 else
1368 {
1369 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1370 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1371 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1372 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1373 if (!pVCpu->iem.s.fInPatchCode)
1374 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1375 }
1376#endif
1377
1378#ifdef DBGFTRACE_ENABLED
1379 switch (enmMode)
1380 {
1381 case IEMMODE_64BIT:
1382 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1383 break;
1384 case IEMMODE_32BIT:
1385 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1386 break;
1387 case IEMMODE_16BIT:
1388 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1389 break;
1390 }
1391#endif
1392}
1393
1394
1395
1396/**
1397 * Prefetch opcodes the first time when starting executing.
1398 *
1399 * @returns Strict VBox status code.
1400 * @param pVCpu The cross context virtual CPU structure of the
1401 * calling thread.
1402 * @param fBypassHandlers Whether to bypass access handlers.
1403 */
1404IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1405{
1406 iemInitDecoder(pVCpu, fBypassHandlers);
1407
1408#ifdef IEM_WITH_CODE_TLB
1409 /** @todo Do ITLB lookup here. */
1410
1411#else /* !IEM_WITH_CODE_TLB */
1412
1413 /*
1414 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1415 *
1416 * First translate CS:rIP to a physical address.
1417 */
1418 uint32_t cbToTryRead;
1419 RTGCPTR GCPtrPC;
1420 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1421 {
1422 cbToTryRead = PAGE_SIZE;
1423 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1424 if (IEM_IS_CANONICAL(GCPtrPC))
1425 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1426 else
1427 return iemRaiseGeneralProtectionFault0(pVCpu);
1428 }
1429 else
1430 {
1431 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1432 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1433 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1434 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1435 else
1436 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1437 if (cbToTryRead) { /* likely */ }
1438 else /* overflowed */
1439 {
1440 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1441 cbToTryRead = UINT32_MAX;
1442 }
1443 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1444 Assert(GCPtrPC <= UINT32_MAX);
1445 }
1446
1447# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1448 /* Allow interpretation of patch manager code blocks since they can for
1449 instance throw #PFs for perfectly good reasons. */
1450 if (pVCpu->iem.s.fInPatchCode)
1451 {
1452 size_t cbRead = 0;
1453 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1454 AssertRCReturn(rc, rc);
1455 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1456 return VINF_SUCCESS;
1457 }
1458# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1459
1460 RTGCPHYS GCPhys;
1461 uint64_t fFlags;
1462 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1463 if (RT_SUCCESS(rc)) { /* probable */ }
1464 else
1465 {
1466 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1467 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1468 }
1469 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1470 else
1471 {
1472 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1473 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1474 }
1475 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1476 else
1477 {
1478 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1479 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1480 }
1481 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1482 /** @todo Check reserved bits and such stuff. PGM is better at doing
1483 * that, so do it when implementing the guest virtual address
1484 * TLB... */
1485
1486 /*
1487 * Read the bytes at this address.
1488 */
1489 PVM pVM = pVCpu->CTX_SUFF(pVM);
1490# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1491 size_t cbActual;
1492 if ( PATMIsEnabled(pVM)
1493 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1494 {
1495 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1496 Assert(cbActual > 0);
1497 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1498 }
1499 else
1500# endif
1501 {
1502 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1503 if (cbToTryRead > cbLeftOnPage)
1504 cbToTryRead = cbLeftOnPage;
1505 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1506 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1507
1508 if (!pVCpu->iem.s.fBypassHandlers)
1509 {
1510 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1511 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1512 { /* likely */ }
1513 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1514 {
1515 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1516 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1517 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1518 }
1519 else
1520 {
1521 Log((RT_SUCCESS(rcStrict)
1522 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1523 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1524 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1525 return rcStrict;
1526 }
1527 }
1528 else
1529 {
1530 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1531 if (RT_SUCCESS(rc))
1532 { /* likely */ }
1533 else
1534 {
1535 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1536 GCPtrPC, GCPhys, rc, cbToTryRead));
1537 return rc;
1538 }
1539 }
1540 pVCpu->iem.s.cbOpcode = cbToTryRead;
1541 }
1542#endif /* !IEM_WITH_CODE_TLB */
1543 return VINF_SUCCESS;
1544}
1545
1546
1547/**
1548 * Invalidates the IEM TLBs.
1549 *
1550 * This is called internally as well as by PGM when moving GC mappings.
1551 *
1552 * @returns
1553 * @param pVCpu The cross context virtual CPU structure of the calling
1554 * thread.
1555 * @param fVmm Set when PGM calls us with a remapping.
1556 */
1557VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1558{
1559#ifdef IEM_WITH_CODE_TLB
1560 pVCpu->iem.s.cbInstrBufTotal = 0;
1561 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1562 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1563 { /* very likely */ }
1564 else
1565 {
1566 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1567 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1568 while (i-- > 0)
1569 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1570 }
1571#endif
1572
1573#ifdef IEM_WITH_DATA_TLB
1574 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1575 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1576 { /* very likely */ }
1577 else
1578 {
1579 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1580 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1581 while (i-- > 0)
1582 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1583 }
1584#endif
1585 NOREF(pVCpu); NOREF(fVmm);
1586}
1587
1588
1589/**
1590 * Invalidates a page in the TLBs.
1591 *
1592 * @param pVCpu The cross context virtual CPU structure of the calling
1593 * thread.
1594 * @param GCPtr The address of the page to invalidate
1595 */
1596VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1597{
1598#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1599 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1600 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1601 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1602 uintptr_t idx = (uint8_t)GCPtr;
1603
1604# ifdef IEM_WITH_CODE_TLB
1605 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1606 {
1607 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1608 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1609 pVCpu->iem.s.cbInstrBufTotal = 0;
1610 }
1611# endif
1612
1613# ifdef IEM_WITH_DATA_TLB
1614 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1615 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1616# endif
1617#else
1618 NOREF(pVCpu); NOREF(GCPtr);
1619#endif
1620}
1621
1622
1623/**
1624 * Invalidates the host physical aspects of the IEM TLBs.
1625 *
1626 * This is called internally as well as by PGM when moving GC mappings.
1627 *
1628 * @param pVCpu The cross context virtual CPU structure of the calling
1629 * thread.
1630 */
1631VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1632{
1633#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1634 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1635
1636# ifdef IEM_WITH_CODE_TLB
1637 pVCpu->iem.s.cbInstrBufTotal = 0;
1638# endif
1639 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1640 if (uTlbPhysRev != 0)
1641 {
1642 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1643 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1644 }
1645 else
1646 {
1647 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1648 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1649
1650 unsigned i;
1651# ifdef IEM_WITH_CODE_TLB
1652 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1653 while (i-- > 0)
1654 {
1655 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1656 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1657 }
1658# endif
1659# ifdef IEM_WITH_DATA_TLB
1660 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1661 while (i-- > 0)
1662 {
1663 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1664 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1665 }
1666# endif
1667 }
1668#else
1669 NOREF(pVCpu);
1670#endif
1671}
1672
1673
1674/**
1675 * Invalidates the host physical aspects of the IEM TLBs.
1676 *
1677 * This is called internally as well as by PGM when moving GC mappings.
1678 *
1679 * @param pVM The cross context VM structure.
1680 *
1681 * @remarks Caller holds the PGM lock.
1682 */
1683VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1684{
1685 RT_NOREF_PV(pVM);
1686}
1687
1688#ifdef IEM_WITH_CODE_TLB
1689
1690/**
1691 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1692 * failure and jumps.
1693 *
1694 * We end up here for a number of reasons:
1695 * - pbInstrBuf isn't yet initialized.
1696 * - Advancing beyond the buffer boundrary (e.g. cross page).
1697 * - Advancing beyond the CS segment limit.
1698 * - Fetching from non-mappable page (e.g. MMIO).
1699 *
1700 * @param pVCpu The cross context virtual CPU structure of the
1701 * calling thread.
1702 * @param pvDst Where to return the bytes.
1703 * @param cbDst Number of bytes to read.
1704 *
1705 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1706 */
1707IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1708{
1709#ifdef IN_RING3
1710 for (;;)
1711 {
1712 Assert(cbDst <= 8);
1713 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1714
1715 /*
1716 * We might have a partial buffer match, deal with that first to make the
1717 * rest simpler. This is the first part of the cross page/buffer case.
1718 */
1719 if (pVCpu->iem.s.pbInstrBuf != NULL)
1720 {
1721 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1722 {
1723 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1724 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1725 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1726
1727 cbDst -= cbCopy;
1728 pvDst = (uint8_t *)pvDst + cbCopy;
1729 offBuf += cbCopy;
1730 pVCpu->iem.s.offInstrNextByte += offBuf;
1731 }
1732 }
1733
1734 /*
1735 * Check segment limit, figuring how much we're allowed to access at this point.
1736 *
1737 * We will fault immediately if RIP is past the segment limit / in non-canonical
1738 * territory. If we do continue, there are one or more bytes to read before we
1739 * end up in trouble and we need to do that first before faulting.
1740 */
1741 RTGCPTR GCPtrFirst;
1742 uint32_t cbMaxRead;
1743 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1744 {
1745 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1746 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1747 { /* likely */ }
1748 else
1749 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1750 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1751 }
1752 else
1753 {
1754 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1755 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1756 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1757 { /* likely */ }
1758 else
1759 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1760 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1761 if (cbMaxRead != 0)
1762 { /* likely */ }
1763 else
1764 {
1765 /* Overflowed because address is 0 and limit is max. */
1766 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1767 cbMaxRead = X86_PAGE_SIZE;
1768 }
1769 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1770 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1771 if (cbMaxRead2 < cbMaxRead)
1772 cbMaxRead = cbMaxRead2;
1773 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1774 }
1775
1776 /*
1777 * Get the TLB entry for this piece of code.
1778 */
1779 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1780 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1781 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1782 if (pTlbe->uTag == uTag)
1783 {
1784 /* likely when executing lots of code, otherwise unlikely */
1785# ifdef VBOX_WITH_STATISTICS
1786 pVCpu->iem.s.CodeTlb.cTlbHits++;
1787# endif
1788 }
1789 else
1790 {
1791 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1792# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1793 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1794 {
1795 pTlbe->uTag = uTag;
1796 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1797 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1798 pTlbe->GCPhys = NIL_RTGCPHYS;
1799 pTlbe->pbMappingR3 = NULL;
1800 }
1801 else
1802# endif
1803 {
1804 RTGCPHYS GCPhys;
1805 uint64_t fFlags;
1806 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1807 if (RT_FAILURE(rc))
1808 {
1809 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1810 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1811 }
1812
1813 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1814 pTlbe->uTag = uTag;
1815 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1816 pTlbe->GCPhys = GCPhys;
1817 pTlbe->pbMappingR3 = NULL;
1818 }
1819 }
1820
1821 /*
1822 * Check TLB page table level access flags.
1823 */
1824 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1825 {
1826 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1827 {
1828 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1829 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1830 }
1831 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1832 {
1833 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1834 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1835 }
1836 }
1837
1838# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1839 /*
1840 * Allow interpretation of patch manager code blocks since they can for
1841 * instance throw #PFs for perfectly good reasons.
1842 */
1843 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1844 { /* no unlikely */ }
1845 else
1846 {
1847 /** @todo Could be optimized this a little in ring-3 if we liked. */
1848 size_t cbRead = 0;
1849 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1850 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1851 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1852 return;
1853 }
1854# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1855
1856 /*
1857 * Look up the physical page info if necessary.
1858 */
1859 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1860 { /* not necessary */ }
1861 else
1862 {
1863 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1864 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1865 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1866 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1867 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1868 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1869 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1870 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1871 }
1872
1873# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1874 /*
1875 * Try do a direct read using the pbMappingR3 pointer.
1876 */
1877 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1878 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1879 {
1880 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1881 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1882 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1883 {
1884 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1885 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1886 }
1887 else
1888 {
1889 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1890 Assert(cbInstr < cbMaxRead);
1891 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1892 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1893 }
1894 if (cbDst <= cbMaxRead)
1895 {
1896 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1897 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1898 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1899 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1900 return;
1901 }
1902 pVCpu->iem.s.pbInstrBuf = NULL;
1903
1904 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1905 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1906 }
1907 else
1908# endif
1909#if 0
1910 /*
1911 * If there is no special read handling, so we can read a bit more and
1912 * put it in the prefetch buffer.
1913 */
1914 if ( cbDst < cbMaxRead
1915 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1916 {
1917 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1918 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1919 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1920 { /* likely */ }
1921 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1922 {
1923 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1924 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1925 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1926 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1927 }
1928 else
1929 {
1930 Log((RT_SUCCESS(rcStrict)
1931 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1932 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1933 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1934 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1935 }
1936 }
1937 /*
1938 * Special read handling, so only read exactly what's needed.
1939 * This is a highly unlikely scenario.
1940 */
1941 else
1942#endif
1943 {
1944 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1945 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1946 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1947 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1948 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1949 { /* likely */ }
1950 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1951 {
1952 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1953 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1954 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1955 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1956 }
1957 else
1958 {
1959 Log((RT_SUCCESS(rcStrict)
1960 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1961 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1962 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1963 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1964 }
1965 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1966 if (cbToRead == cbDst)
1967 return;
1968 }
1969
1970 /*
1971 * More to read, loop.
1972 */
1973 cbDst -= cbMaxRead;
1974 pvDst = (uint8_t *)pvDst + cbMaxRead;
1975 }
1976#else
1977 RT_NOREF(pvDst, cbDst);
1978 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1979#endif
1980}
1981
1982#else
1983
1984/**
1985 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1986 * exception if it fails.
1987 *
1988 * @returns Strict VBox status code.
1989 * @param pVCpu The cross context virtual CPU structure of the
1990 * calling thread.
1991 * @param cbMin The minimum number of bytes relative offOpcode
1992 * that must be read.
1993 */
1994IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
1995{
1996 /*
1997 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1998 *
1999 * First translate CS:rIP to a physical address.
2000 */
2001 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2002 uint32_t cbToTryRead;
2003 RTGCPTR GCPtrNext;
2004 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2005 {
2006 cbToTryRead = PAGE_SIZE;
2007 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2008 if (!IEM_IS_CANONICAL(GCPtrNext))
2009 return iemRaiseGeneralProtectionFault0(pVCpu);
2010 }
2011 else
2012 {
2013 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2014 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2015 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2016 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2017 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2018 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2019 if (!cbToTryRead) /* overflowed */
2020 {
2021 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2022 cbToTryRead = UINT32_MAX;
2023 /** @todo check out wrapping around the code segment. */
2024 }
2025 if (cbToTryRead < cbMin - cbLeft)
2026 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2027 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2028 }
2029
2030 /* Only read up to the end of the page, and make sure we don't read more
2031 than the opcode buffer can hold. */
2032 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2033 if (cbToTryRead > cbLeftOnPage)
2034 cbToTryRead = cbLeftOnPage;
2035 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2036 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2037/** @todo r=bird: Convert assertion into undefined opcode exception? */
2038 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2039
2040# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2041 /* Allow interpretation of patch manager code blocks since they can for
2042 instance throw #PFs for perfectly good reasons. */
2043 if (pVCpu->iem.s.fInPatchCode)
2044 {
2045 size_t cbRead = 0;
2046 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2047 AssertRCReturn(rc, rc);
2048 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2049 return VINF_SUCCESS;
2050 }
2051# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2052
2053 RTGCPHYS GCPhys;
2054 uint64_t fFlags;
2055 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2056 if (RT_FAILURE(rc))
2057 {
2058 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2059 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2060 }
2061 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2062 {
2063 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2064 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2065 }
2066 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2067 {
2068 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2069 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2070 }
2071 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2072 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2073 /** @todo Check reserved bits and such stuff. PGM is better at doing
2074 * that, so do it when implementing the guest virtual address
2075 * TLB... */
2076
2077 /*
2078 * Read the bytes at this address.
2079 *
2080 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2081 * and since PATM should only patch the start of an instruction there
2082 * should be no need to check again here.
2083 */
2084 if (!pVCpu->iem.s.fBypassHandlers)
2085 {
2086 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2087 cbToTryRead, PGMACCESSORIGIN_IEM);
2088 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2089 { /* likely */ }
2090 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2091 {
2092 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2093 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2094 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2095 }
2096 else
2097 {
2098 Log((RT_SUCCESS(rcStrict)
2099 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2100 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2101 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2102 return rcStrict;
2103 }
2104 }
2105 else
2106 {
2107 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2108 if (RT_SUCCESS(rc))
2109 { /* likely */ }
2110 else
2111 {
2112 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2113 return rc;
2114 }
2115 }
2116 pVCpu->iem.s.cbOpcode += cbToTryRead;
2117 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2118
2119 return VINF_SUCCESS;
2120}
2121
2122#endif /* !IEM_WITH_CODE_TLB */
2123#ifndef IEM_WITH_SETJMP
2124
2125/**
2126 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2127 *
2128 * @returns Strict VBox status code.
2129 * @param pVCpu The cross context virtual CPU structure of the
2130 * calling thread.
2131 * @param pb Where to return the opcode byte.
2132 */
2133DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2134{
2135 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2136 if (rcStrict == VINF_SUCCESS)
2137 {
2138 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2139 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2140 pVCpu->iem.s.offOpcode = offOpcode + 1;
2141 }
2142 else
2143 *pb = 0;
2144 return rcStrict;
2145}
2146
2147
2148/**
2149 * Fetches the next opcode byte.
2150 *
2151 * @returns Strict VBox status code.
2152 * @param pVCpu The cross context virtual CPU structure of the
2153 * calling thread.
2154 * @param pu8 Where to return the opcode byte.
2155 */
2156DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2157{
2158 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2159 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2160 {
2161 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2162 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2163 return VINF_SUCCESS;
2164 }
2165 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2166}
2167
2168#else /* IEM_WITH_SETJMP */
2169
2170/**
2171 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2172 *
2173 * @returns The opcode byte.
2174 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2175 */
2176DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2177{
2178# ifdef IEM_WITH_CODE_TLB
2179 uint8_t u8;
2180 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2181 return u8;
2182# else
2183 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2184 if (rcStrict == VINF_SUCCESS)
2185 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2186 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2187# endif
2188}
2189
2190
2191/**
2192 * Fetches the next opcode byte, longjmp on error.
2193 *
2194 * @returns The opcode byte.
2195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2196 */
2197DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2198{
2199# ifdef IEM_WITH_CODE_TLB
2200 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2201 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2202 if (RT_LIKELY( pbBuf != NULL
2203 && offBuf < pVCpu->iem.s.cbInstrBuf))
2204 {
2205 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2206 return pbBuf[offBuf];
2207 }
2208# else
2209 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2210 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2211 {
2212 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2213 return pVCpu->iem.s.abOpcode[offOpcode];
2214 }
2215# endif
2216 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2217}
2218
2219#endif /* IEM_WITH_SETJMP */
2220
2221/**
2222 * Fetches the next opcode byte, returns automatically on failure.
2223 *
2224 * @param a_pu8 Where to return the opcode byte.
2225 * @remark Implicitly references pVCpu.
2226 */
2227#ifndef IEM_WITH_SETJMP
2228# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2229 do \
2230 { \
2231 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2232 if (rcStrict2 == VINF_SUCCESS) \
2233 { /* likely */ } \
2234 else \
2235 return rcStrict2; \
2236 } while (0)
2237#else
2238# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2239#endif /* IEM_WITH_SETJMP */
2240
2241
2242#ifndef IEM_WITH_SETJMP
2243/**
2244 * Fetches the next signed byte from the opcode stream.
2245 *
2246 * @returns Strict VBox status code.
2247 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2248 * @param pi8 Where to return the signed byte.
2249 */
2250DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2251{
2252 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2253}
2254#endif /* !IEM_WITH_SETJMP */
2255
2256
2257/**
2258 * Fetches the next signed byte from the opcode stream, returning automatically
2259 * on failure.
2260 *
2261 * @param a_pi8 Where to return the signed byte.
2262 * @remark Implicitly references pVCpu.
2263 */
2264#ifndef IEM_WITH_SETJMP
2265# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2266 do \
2267 { \
2268 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2269 if (rcStrict2 != VINF_SUCCESS) \
2270 return rcStrict2; \
2271 } while (0)
2272#else /* IEM_WITH_SETJMP */
2273# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2274
2275#endif /* IEM_WITH_SETJMP */
2276
2277#ifndef IEM_WITH_SETJMP
2278
2279/**
2280 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2281 *
2282 * @returns Strict VBox status code.
2283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2284 * @param pu16 Where to return the opcode dword.
2285 */
2286DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2287{
2288 uint8_t u8;
2289 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2290 if (rcStrict == VINF_SUCCESS)
2291 *pu16 = (int8_t)u8;
2292 return rcStrict;
2293}
2294
2295
2296/**
2297 * Fetches the next signed byte from the opcode stream, extending it to
2298 * unsigned 16-bit.
2299 *
2300 * @returns Strict VBox status code.
2301 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2302 * @param pu16 Where to return the unsigned word.
2303 */
2304DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2305{
2306 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2307 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2308 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2309
2310 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2311 pVCpu->iem.s.offOpcode = offOpcode + 1;
2312 return VINF_SUCCESS;
2313}
2314
2315#endif /* !IEM_WITH_SETJMP */
2316
2317/**
2318 * Fetches the next signed byte from the opcode stream and sign-extending it to
2319 * a word, returning automatically on failure.
2320 *
2321 * @param a_pu16 Where to return the word.
2322 * @remark Implicitly references pVCpu.
2323 */
2324#ifndef IEM_WITH_SETJMP
2325# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2326 do \
2327 { \
2328 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2329 if (rcStrict2 != VINF_SUCCESS) \
2330 return rcStrict2; \
2331 } while (0)
2332#else
2333# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2334#endif
2335
2336#ifndef IEM_WITH_SETJMP
2337
2338/**
2339 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2340 *
2341 * @returns Strict VBox status code.
2342 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2343 * @param pu32 Where to return the opcode dword.
2344 */
2345DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2346{
2347 uint8_t u8;
2348 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2349 if (rcStrict == VINF_SUCCESS)
2350 *pu32 = (int8_t)u8;
2351 return rcStrict;
2352}
2353
2354
2355/**
2356 * Fetches the next signed byte from the opcode stream, extending it to
2357 * unsigned 32-bit.
2358 *
2359 * @returns Strict VBox status code.
2360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2361 * @param pu32 Where to return the unsigned dword.
2362 */
2363DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2364{
2365 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2366 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2367 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2368
2369 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2370 pVCpu->iem.s.offOpcode = offOpcode + 1;
2371 return VINF_SUCCESS;
2372}
2373
2374#endif /* !IEM_WITH_SETJMP */
2375
2376/**
2377 * Fetches the next signed byte from the opcode stream and sign-extending it to
2378 * a word, returning automatically on failure.
2379 *
2380 * @param a_pu32 Where to return the word.
2381 * @remark Implicitly references pVCpu.
2382 */
2383#ifndef IEM_WITH_SETJMP
2384#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2385 do \
2386 { \
2387 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2388 if (rcStrict2 != VINF_SUCCESS) \
2389 return rcStrict2; \
2390 } while (0)
2391#else
2392# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2393#endif
2394
2395#ifndef IEM_WITH_SETJMP
2396
2397/**
2398 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2399 *
2400 * @returns Strict VBox status code.
2401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2402 * @param pu64 Where to return the opcode qword.
2403 */
2404DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2405{
2406 uint8_t u8;
2407 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2408 if (rcStrict == VINF_SUCCESS)
2409 *pu64 = (int8_t)u8;
2410 return rcStrict;
2411}
2412
2413
2414/**
2415 * Fetches the next signed byte from the opcode stream, extending it to
2416 * unsigned 64-bit.
2417 *
2418 * @returns Strict VBox status code.
2419 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2420 * @param pu64 Where to return the unsigned qword.
2421 */
2422DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2423{
2424 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2425 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2426 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2427
2428 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2429 pVCpu->iem.s.offOpcode = offOpcode + 1;
2430 return VINF_SUCCESS;
2431}
2432
2433#endif /* !IEM_WITH_SETJMP */
2434
2435
2436/**
2437 * Fetches the next signed byte from the opcode stream and sign-extending it to
2438 * a word, returning automatically on failure.
2439 *
2440 * @param a_pu64 Where to return the word.
2441 * @remark Implicitly references pVCpu.
2442 */
2443#ifndef IEM_WITH_SETJMP
2444# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2445 do \
2446 { \
2447 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2448 if (rcStrict2 != VINF_SUCCESS) \
2449 return rcStrict2; \
2450 } while (0)
2451#else
2452# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2453#endif
2454
2455
2456#ifndef IEM_WITH_SETJMP
2457/**
2458 * Fetches the next opcode byte.
2459 *
2460 * @returns Strict VBox status code.
2461 * @param pVCpu The cross context virtual CPU structure of the
2462 * calling thread.
2463 * @param pu8 Where to return the opcode byte.
2464 */
2465DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2466{
2467 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2468 pVCpu->iem.s.offModRm = offOpcode;
2469 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2470 {
2471 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2472 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2473 return VINF_SUCCESS;
2474 }
2475 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2476}
2477#else /* IEM_WITH_SETJMP */
2478/**
2479 * Fetches the next opcode byte, longjmp on error.
2480 *
2481 * @returns The opcode byte.
2482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2483 */
2484DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2485{
2486# ifdef IEM_WITH_CODE_TLB
2487 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2488 pVCpu->iem.s.offModRm = offBuf;
2489 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2490 if (RT_LIKELY( pbBuf != NULL
2491 && offBuf < pVCpu->iem.s.cbInstrBuf))
2492 {
2493 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2494 return pbBuf[offBuf];
2495 }
2496# else
2497 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2498 pVCpu->iem.s.offModRm = offOpcode;
2499 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2500 {
2501 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2502 return pVCpu->iem.s.abOpcode[offOpcode];
2503 }
2504# endif
2505 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2506}
2507#endif /* IEM_WITH_SETJMP */
2508
2509/**
2510 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2511 * on failure.
2512 *
2513 * Will note down the position of the ModR/M byte for VT-x exits.
2514 *
2515 * @param a_pbRm Where to return the RM opcode byte.
2516 * @remark Implicitly references pVCpu.
2517 */
2518#ifndef IEM_WITH_SETJMP
2519# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2520 do \
2521 { \
2522 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2523 if (rcStrict2 == VINF_SUCCESS) \
2524 { /* likely */ } \
2525 else \
2526 return rcStrict2; \
2527 } while (0)
2528#else
2529# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2530#endif /* IEM_WITH_SETJMP */
2531
2532
2533#ifndef IEM_WITH_SETJMP
2534
2535/**
2536 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2537 *
2538 * @returns Strict VBox status code.
2539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2540 * @param pu16 Where to return the opcode word.
2541 */
2542DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2543{
2544 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2545 if (rcStrict == VINF_SUCCESS)
2546 {
2547 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2548# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2549 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2550# else
2551 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2552# endif
2553 pVCpu->iem.s.offOpcode = offOpcode + 2;
2554 }
2555 else
2556 *pu16 = 0;
2557 return rcStrict;
2558}
2559
2560
2561/**
2562 * Fetches the next opcode word.
2563 *
2564 * @returns Strict VBox status code.
2565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2566 * @param pu16 Where to return the opcode word.
2567 */
2568DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2569{
2570 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2571 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2572 {
2573 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2574# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2575 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2576# else
2577 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2578# endif
2579 return VINF_SUCCESS;
2580 }
2581 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2582}
2583
2584#else /* IEM_WITH_SETJMP */
2585
2586/**
2587 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2588 *
2589 * @returns The opcode word.
2590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2591 */
2592DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2593{
2594# ifdef IEM_WITH_CODE_TLB
2595 uint16_t u16;
2596 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2597 return u16;
2598# else
2599 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2600 if (rcStrict == VINF_SUCCESS)
2601 {
2602 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2603 pVCpu->iem.s.offOpcode += 2;
2604# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2605 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2606# else
2607 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2608# endif
2609 }
2610 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2611# endif
2612}
2613
2614
2615/**
2616 * Fetches the next opcode word, longjmp on error.
2617 *
2618 * @returns The opcode word.
2619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2620 */
2621DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2622{
2623# ifdef IEM_WITH_CODE_TLB
2624 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2625 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2626 if (RT_LIKELY( pbBuf != NULL
2627 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2628 {
2629 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2630# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2631 return *(uint16_t const *)&pbBuf[offBuf];
2632# else
2633 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2634# endif
2635 }
2636# else
2637 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2638 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2639 {
2640 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2641# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2642 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2643# else
2644 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2645# endif
2646 }
2647# endif
2648 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2649}
2650
2651#endif /* IEM_WITH_SETJMP */
2652
2653
2654/**
2655 * Fetches the next opcode word, returns automatically on failure.
2656 *
2657 * @param a_pu16 Where to return the opcode word.
2658 * @remark Implicitly references pVCpu.
2659 */
2660#ifndef IEM_WITH_SETJMP
2661# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2662 do \
2663 { \
2664 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2665 if (rcStrict2 != VINF_SUCCESS) \
2666 return rcStrict2; \
2667 } while (0)
2668#else
2669# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2670#endif
2671
2672#ifndef IEM_WITH_SETJMP
2673
2674/**
2675 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2676 *
2677 * @returns Strict VBox status code.
2678 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2679 * @param pu32 Where to return the opcode double word.
2680 */
2681DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2682{
2683 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2684 if (rcStrict == VINF_SUCCESS)
2685 {
2686 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2687 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2688 pVCpu->iem.s.offOpcode = offOpcode + 2;
2689 }
2690 else
2691 *pu32 = 0;
2692 return rcStrict;
2693}
2694
2695
2696/**
2697 * Fetches the next opcode word, zero extending it to a double word.
2698 *
2699 * @returns Strict VBox status code.
2700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2701 * @param pu32 Where to return the opcode double word.
2702 */
2703DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2704{
2705 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2706 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2707 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2708
2709 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2710 pVCpu->iem.s.offOpcode = offOpcode + 2;
2711 return VINF_SUCCESS;
2712}
2713
2714#endif /* !IEM_WITH_SETJMP */
2715
2716
2717/**
2718 * Fetches the next opcode word and zero extends it to a double word, returns
2719 * automatically on failure.
2720 *
2721 * @param a_pu32 Where to return the opcode double word.
2722 * @remark Implicitly references pVCpu.
2723 */
2724#ifndef IEM_WITH_SETJMP
2725# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2726 do \
2727 { \
2728 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2729 if (rcStrict2 != VINF_SUCCESS) \
2730 return rcStrict2; \
2731 } while (0)
2732#else
2733# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2734#endif
2735
2736#ifndef IEM_WITH_SETJMP
2737
2738/**
2739 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2740 *
2741 * @returns Strict VBox status code.
2742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2743 * @param pu64 Where to return the opcode quad word.
2744 */
2745DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2746{
2747 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2748 if (rcStrict == VINF_SUCCESS)
2749 {
2750 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2751 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2752 pVCpu->iem.s.offOpcode = offOpcode + 2;
2753 }
2754 else
2755 *pu64 = 0;
2756 return rcStrict;
2757}
2758
2759
2760/**
2761 * Fetches the next opcode word, zero extending it to a quad word.
2762 *
2763 * @returns Strict VBox status code.
2764 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2765 * @param pu64 Where to return the opcode quad word.
2766 */
2767DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2768{
2769 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2770 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2771 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2772
2773 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2774 pVCpu->iem.s.offOpcode = offOpcode + 2;
2775 return VINF_SUCCESS;
2776}
2777
2778#endif /* !IEM_WITH_SETJMP */
2779
2780/**
2781 * Fetches the next opcode word and zero extends it to a quad word, returns
2782 * automatically on failure.
2783 *
2784 * @param a_pu64 Where to return the opcode quad word.
2785 * @remark Implicitly references pVCpu.
2786 */
2787#ifndef IEM_WITH_SETJMP
2788# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2789 do \
2790 { \
2791 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2792 if (rcStrict2 != VINF_SUCCESS) \
2793 return rcStrict2; \
2794 } while (0)
2795#else
2796# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2797#endif
2798
2799
2800#ifndef IEM_WITH_SETJMP
2801/**
2802 * Fetches the next signed word from the opcode stream.
2803 *
2804 * @returns Strict VBox status code.
2805 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2806 * @param pi16 Where to return the signed word.
2807 */
2808DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2809{
2810 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2811}
2812#endif /* !IEM_WITH_SETJMP */
2813
2814
2815/**
2816 * Fetches the next signed word from the opcode stream, returning automatically
2817 * on failure.
2818 *
2819 * @param a_pi16 Where to return the signed word.
2820 * @remark Implicitly references pVCpu.
2821 */
2822#ifndef IEM_WITH_SETJMP
2823# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2824 do \
2825 { \
2826 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2827 if (rcStrict2 != VINF_SUCCESS) \
2828 return rcStrict2; \
2829 } while (0)
2830#else
2831# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2832#endif
2833
2834#ifndef IEM_WITH_SETJMP
2835
2836/**
2837 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2838 *
2839 * @returns Strict VBox status code.
2840 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2841 * @param pu32 Where to return the opcode dword.
2842 */
2843DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2844{
2845 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2846 if (rcStrict == VINF_SUCCESS)
2847 {
2848 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2849# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2850 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2851# else
2852 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2853 pVCpu->iem.s.abOpcode[offOpcode + 1],
2854 pVCpu->iem.s.abOpcode[offOpcode + 2],
2855 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2856# endif
2857 pVCpu->iem.s.offOpcode = offOpcode + 4;
2858 }
2859 else
2860 *pu32 = 0;
2861 return rcStrict;
2862}
2863
2864
2865/**
2866 * Fetches the next opcode dword.
2867 *
2868 * @returns Strict VBox status code.
2869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2870 * @param pu32 Where to return the opcode double word.
2871 */
2872DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2873{
2874 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2875 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2876 {
2877 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2878# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2879 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2880# else
2881 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2882 pVCpu->iem.s.abOpcode[offOpcode + 1],
2883 pVCpu->iem.s.abOpcode[offOpcode + 2],
2884 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2885# endif
2886 return VINF_SUCCESS;
2887 }
2888 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2889}
2890
2891#else /* !IEM_WITH_SETJMP */
2892
2893/**
2894 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2895 *
2896 * @returns The opcode dword.
2897 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2898 */
2899DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2900{
2901# ifdef IEM_WITH_CODE_TLB
2902 uint32_t u32;
2903 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2904 return u32;
2905# else
2906 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2907 if (rcStrict == VINF_SUCCESS)
2908 {
2909 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2910 pVCpu->iem.s.offOpcode = offOpcode + 4;
2911# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2912 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2913# else
2914 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2915 pVCpu->iem.s.abOpcode[offOpcode + 1],
2916 pVCpu->iem.s.abOpcode[offOpcode + 2],
2917 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2918# endif
2919 }
2920 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2921# endif
2922}
2923
2924
2925/**
2926 * Fetches the next opcode dword, longjmp on error.
2927 *
2928 * @returns The opcode dword.
2929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2930 */
2931DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2932{
2933# ifdef IEM_WITH_CODE_TLB
2934 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2935 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2936 if (RT_LIKELY( pbBuf != NULL
2937 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2938 {
2939 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2940# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2941 return *(uint32_t const *)&pbBuf[offBuf];
2942# else
2943 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2944 pbBuf[offBuf + 1],
2945 pbBuf[offBuf + 2],
2946 pbBuf[offBuf + 3]);
2947# endif
2948 }
2949# else
2950 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2951 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2952 {
2953 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2954# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2955 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2956# else
2957 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2958 pVCpu->iem.s.abOpcode[offOpcode + 1],
2959 pVCpu->iem.s.abOpcode[offOpcode + 2],
2960 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2961# endif
2962 }
2963# endif
2964 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2965}
2966
2967#endif /* !IEM_WITH_SETJMP */
2968
2969
2970/**
2971 * Fetches the next opcode dword, returns automatically on failure.
2972 *
2973 * @param a_pu32 Where to return the opcode dword.
2974 * @remark Implicitly references pVCpu.
2975 */
2976#ifndef IEM_WITH_SETJMP
2977# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2978 do \
2979 { \
2980 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2981 if (rcStrict2 != VINF_SUCCESS) \
2982 return rcStrict2; \
2983 } while (0)
2984#else
2985# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2986#endif
2987
2988#ifndef IEM_WITH_SETJMP
2989
2990/**
2991 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2992 *
2993 * @returns Strict VBox status code.
2994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2995 * @param pu64 Where to return the opcode dword.
2996 */
2997DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2998{
2999 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3000 if (rcStrict == VINF_SUCCESS)
3001 {
3002 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3003 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3004 pVCpu->iem.s.abOpcode[offOpcode + 1],
3005 pVCpu->iem.s.abOpcode[offOpcode + 2],
3006 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3007 pVCpu->iem.s.offOpcode = offOpcode + 4;
3008 }
3009 else
3010 *pu64 = 0;
3011 return rcStrict;
3012}
3013
3014
3015/**
3016 * Fetches the next opcode dword, zero extending it to a quad word.
3017 *
3018 * @returns Strict VBox status code.
3019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3020 * @param pu64 Where to return the opcode quad word.
3021 */
3022DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3023{
3024 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3025 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3026 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3027
3028 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3029 pVCpu->iem.s.abOpcode[offOpcode + 1],
3030 pVCpu->iem.s.abOpcode[offOpcode + 2],
3031 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3032 pVCpu->iem.s.offOpcode = offOpcode + 4;
3033 return VINF_SUCCESS;
3034}
3035
3036#endif /* !IEM_WITH_SETJMP */
3037
3038
3039/**
3040 * Fetches the next opcode dword and zero extends it to a quad word, returns
3041 * automatically on failure.
3042 *
3043 * @param a_pu64 Where to return the opcode quad word.
3044 * @remark Implicitly references pVCpu.
3045 */
3046#ifndef IEM_WITH_SETJMP
3047# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3048 do \
3049 { \
3050 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3051 if (rcStrict2 != VINF_SUCCESS) \
3052 return rcStrict2; \
3053 } while (0)
3054#else
3055# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3056#endif
3057
3058
3059#ifndef IEM_WITH_SETJMP
3060/**
3061 * Fetches the next signed double word from the opcode stream.
3062 *
3063 * @returns Strict VBox status code.
3064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3065 * @param pi32 Where to return the signed double word.
3066 */
3067DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3068{
3069 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3070}
3071#endif
3072
3073/**
3074 * Fetches the next signed double word from the opcode stream, returning
3075 * automatically on failure.
3076 *
3077 * @param a_pi32 Where to return the signed double word.
3078 * @remark Implicitly references pVCpu.
3079 */
3080#ifndef IEM_WITH_SETJMP
3081# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3082 do \
3083 { \
3084 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3085 if (rcStrict2 != VINF_SUCCESS) \
3086 return rcStrict2; \
3087 } while (0)
3088#else
3089# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3090#endif
3091
3092#ifndef IEM_WITH_SETJMP
3093
3094/**
3095 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3096 *
3097 * @returns Strict VBox status code.
3098 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3099 * @param pu64 Where to return the opcode qword.
3100 */
3101DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3102{
3103 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3104 if (rcStrict == VINF_SUCCESS)
3105 {
3106 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3107 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3108 pVCpu->iem.s.abOpcode[offOpcode + 1],
3109 pVCpu->iem.s.abOpcode[offOpcode + 2],
3110 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3111 pVCpu->iem.s.offOpcode = offOpcode + 4;
3112 }
3113 else
3114 *pu64 = 0;
3115 return rcStrict;
3116}
3117
3118
3119/**
3120 * Fetches the next opcode dword, sign extending it into a quad word.
3121 *
3122 * @returns Strict VBox status code.
3123 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3124 * @param pu64 Where to return the opcode quad word.
3125 */
3126DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3127{
3128 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3129 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3130 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3131
3132 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3133 pVCpu->iem.s.abOpcode[offOpcode + 1],
3134 pVCpu->iem.s.abOpcode[offOpcode + 2],
3135 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3136 *pu64 = i32;
3137 pVCpu->iem.s.offOpcode = offOpcode + 4;
3138 return VINF_SUCCESS;
3139}
3140
3141#endif /* !IEM_WITH_SETJMP */
3142
3143
3144/**
3145 * Fetches the next opcode double word and sign extends it to a quad word,
3146 * returns automatically on failure.
3147 *
3148 * @param a_pu64 Where to return the opcode quad word.
3149 * @remark Implicitly references pVCpu.
3150 */
3151#ifndef IEM_WITH_SETJMP
3152# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3153 do \
3154 { \
3155 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3156 if (rcStrict2 != VINF_SUCCESS) \
3157 return rcStrict2; \
3158 } while (0)
3159#else
3160# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3161#endif
3162
3163#ifndef IEM_WITH_SETJMP
3164
3165/**
3166 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3167 *
3168 * @returns Strict VBox status code.
3169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3170 * @param pu64 Where to return the opcode qword.
3171 */
3172DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3173{
3174 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3175 if (rcStrict == VINF_SUCCESS)
3176 {
3177 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3178# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3179 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3180# else
3181 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3182 pVCpu->iem.s.abOpcode[offOpcode + 1],
3183 pVCpu->iem.s.abOpcode[offOpcode + 2],
3184 pVCpu->iem.s.abOpcode[offOpcode + 3],
3185 pVCpu->iem.s.abOpcode[offOpcode + 4],
3186 pVCpu->iem.s.abOpcode[offOpcode + 5],
3187 pVCpu->iem.s.abOpcode[offOpcode + 6],
3188 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3189# endif
3190 pVCpu->iem.s.offOpcode = offOpcode + 8;
3191 }
3192 else
3193 *pu64 = 0;
3194 return rcStrict;
3195}
3196
3197
3198/**
3199 * Fetches the next opcode qword.
3200 *
3201 * @returns Strict VBox status code.
3202 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3203 * @param pu64 Where to return the opcode qword.
3204 */
3205DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3206{
3207 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3208 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3209 {
3210# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3211 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3212# else
3213 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3214 pVCpu->iem.s.abOpcode[offOpcode + 1],
3215 pVCpu->iem.s.abOpcode[offOpcode + 2],
3216 pVCpu->iem.s.abOpcode[offOpcode + 3],
3217 pVCpu->iem.s.abOpcode[offOpcode + 4],
3218 pVCpu->iem.s.abOpcode[offOpcode + 5],
3219 pVCpu->iem.s.abOpcode[offOpcode + 6],
3220 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3221# endif
3222 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3223 return VINF_SUCCESS;
3224 }
3225 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3226}
3227
3228#else /* IEM_WITH_SETJMP */
3229
3230/**
3231 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3232 *
3233 * @returns The opcode qword.
3234 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3235 */
3236DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3237{
3238# ifdef IEM_WITH_CODE_TLB
3239 uint64_t u64;
3240 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3241 return u64;
3242# else
3243 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3244 if (rcStrict == VINF_SUCCESS)
3245 {
3246 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3247 pVCpu->iem.s.offOpcode = offOpcode + 8;
3248# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3249 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3250# else
3251 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3252 pVCpu->iem.s.abOpcode[offOpcode + 1],
3253 pVCpu->iem.s.abOpcode[offOpcode + 2],
3254 pVCpu->iem.s.abOpcode[offOpcode + 3],
3255 pVCpu->iem.s.abOpcode[offOpcode + 4],
3256 pVCpu->iem.s.abOpcode[offOpcode + 5],
3257 pVCpu->iem.s.abOpcode[offOpcode + 6],
3258 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3259# endif
3260 }
3261 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3262# endif
3263}
3264
3265
3266/**
3267 * Fetches the next opcode qword, longjmp on error.
3268 *
3269 * @returns The opcode qword.
3270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3271 */
3272DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3273{
3274# ifdef IEM_WITH_CODE_TLB
3275 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3276 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3277 if (RT_LIKELY( pbBuf != NULL
3278 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3279 {
3280 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3281# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3282 return *(uint64_t const *)&pbBuf[offBuf];
3283# else
3284 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3285 pbBuf[offBuf + 1],
3286 pbBuf[offBuf + 2],
3287 pbBuf[offBuf + 3],
3288 pbBuf[offBuf + 4],
3289 pbBuf[offBuf + 5],
3290 pbBuf[offBuf + 6],
3291 pbBuf[offBuf + 7]);
3292# endif
3293 }
3294# else
3295 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3296 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3297 {
3298 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3299# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3300 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3301# else
3302 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3303 pVCpu->iem.s.abOpcode[offOpcode + 1],
3304 pVCpu->iem.s.abOpcode[offOpcode + 2],
3305 pVCpu->iem.s.abOpcode[offOpcode + 3],
3306 pVCpu->iem.s.abOpcode[offOpcode + 4],
3307 pVCpu->iem.s.abOpcode[offOpcode + 5],
3308 pVCpu->iem.s.abOpcode[offOpcode + 6],
3309 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3310# endif
3311 }
3312# endif
3313 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3314}
3315
3316#endif /* IEM_WITH_SETJMP */
3317
3318/**
3319 * Fetches the next opcode quad word, returns automatically on failure.
3320 *
3321 * @param a_pu64 Where to return the opcode quad word.
3322 * @remark Implicitly references pVCpu.
3323 */
3324#ifndef IEM_WITH_SETJMP
3325# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3326 do \
3327 { \
3328 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3329 if (rcStrict2 != VINF_SUCCESS) \
3330 return rcStrict2; \
3331 } while (0)
3332#else
3333# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3334#endif
3335
3336
3337/** @name Misc Worker Functions.
3338 * @{
3339 */
3340
3341/**
3342 * Gets the exception class for the specified exception vector.
3343 *
3344 * @returns The class of the specified exception.
3345 * @param uVector The exception vector.
3346 */
3347IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3348{
3349 Assert(uVector <= X86_XCPT_LAST);
3350 switch (uVector)
3351 {
3352 case X86_XCPT_DE:
3353 case X86_XCPT_TS:
3354 case X86_XCPT_NP:
3355 case X86_XCPT_SS:
3356 case X86_XCPT_GP:
3357 case X86_XCPT_SX: /* AMD only */
3358 return IEMXCPTCLASS_CONTRIBUTORY;
3359
3360 case X86_XCPT_PF:
3361 case X86_XCPT_VE: /* Intel only */
3362 return IEMXCPTCLASS_PAGE_FAULT;
3363
3364 case X86_XCPT_DF:
3365 return IEMXCPTCLASS_DOUBLE_FAULT;
3366 }
3367 return IEMXCPTCLASS_BENIGN;
3368}
3369
3370
3371/**
3372 * Evaluates how to handle an exception caused during delivery of another event
3373 * (exception / interrupt).
3374 *
3375 * @returns How to handle the recursive exception.
3376 * @param pVCpu The cross context virtual CPU structure of the
3377 * calling thread.
3378 * @param fPrevFlags The flags of the previous event.
3379 * @param uPrevVector The vector of the previous event.
3380 * @param fCurFlags The flags of the current exception.
3381 * @param uCurVector The vector of the current exception.
3382 * @param pfXcptRaiseInfo Where to store additional information about the
3383 * exception condition. Optional.
3384 */
3385VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3386 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3387{
3388 /*
3389 * Only CPU exceptions can be raised while delivering other events, software interrupt
3390 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3391 */
3392 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3393 Assert(pVCpu); RT_NOREF(pVCpu);
3394 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3395
3396 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3397 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3398 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3399 {
3400 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3401 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3402 {
3403 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3404 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3405 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3406 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3407 {
3408 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3409 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3410 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3411 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3412 uCurVector, pVCpu->cpum.GstCtx.cr2));
3413 }
3414 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3415 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3416 {
3417 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3418 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3419 }
3420 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3421 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3422 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3423 {
3424 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3425 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3426 }
3427 }
3428 else
3429 {
3430 if (uPrevVector == X86_XCPT_NMI)
3431 {
3432 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3433 if (uCurVector == X86_XCPT_PF)
3434 {
3435 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3436 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3437 }
3438 }
3439 else if ( uPrevVector == X86_XCPT_AC
3440 && uCurVector == X86_XCPT_AC)
3441 {
3442 enmRaise = IEMXCPTRAISE_CPU_HANG;
3443 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3444 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3445 }
3446 }
3447 }
3448 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3449 {
3450 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3451 if (uCurVector == X86_XCPT_PF)
3452 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3453 }
3454 else
3455 {
3456 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3457 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3458 }
3459
3460 if (pfXcptRaiseInfo)
3461 *pfXcptRaiseInfo = fRaiseInfo;
3462 return enmRaise;
3463}
3464
3465
3466/**
3467 * Enters the CPU shutdown state initiated by a triple fault or other
3468 * unrecoverable conditions.
3469 *
3470 * @returns Strict VBox status code.
3471 * @param pVCpu The cross context virtual CPU structure of the
3472 * calling thread.
3473 */
3474IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3475{
3476 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3477 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3478
3479 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3480 {
3481 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3482 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3483 }
3484
3485 RT_NOREF(pVCpu);
3486 return VINF_EM_TRIPLE_FAULT;
3487}
3488
3489
3490/**
3491 * Validates a new SS segment.
3492 *
3493 * @returns VBox strict status code.
3494 * @param pVCpu The cross context virtual CPU structure of the
3495 * calling thread.
3496 * @param NewSS The new SS selctor.
3497 * @param uCpl The CPL to load the stack for.
3498 * @param pDesc Where to return the descriptor.
3499 */
3500IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3501{
3502 /* Null selectors are not allowed (we're not called for dispatching
3503 interrupts with SS=0 in long mode). */
3504 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3505 {
3506 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3507 return iemRaiseTaskSwitchFault0(pVCpu);
3508 }
3509
3510 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3511 if ((NewSS & X86_SEL_RPL) != uCpl)
3512 {
3513 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3514 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3515 }
3516
3517 /*
3518 * Read the descriptor.
3519 */
3520 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3521 if (rcStrict != VINF_SUCCESS)
3522 return rcStrict;
3523
3524 /*
3525 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3526 */
3527 if (!pDesc->Legacy.Gen.u1DescType)
3528 {
3529 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3530 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3531 }
3532
3533 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3534 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3535 {
3536 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3537 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3538 }
3539 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3540 {
3541 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3542 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3543 }
3544
3545 /* Is it there? */
3546 /** @todo testcase: Is this checked before the canonical / limit check below? */
3547 if (!pDesc->Legacy.Gen.u1Present)
3548 {
3549 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3550 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3551 }
3552
3553 return VINF_SUCCESS;
3554}
3555
3556
3557/**
3558 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3559 * not.
3560 *
3561 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3562 */
3563#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3564# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3565#else
3566# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3567#endif
3568
3569/**
3570 * Updates the EFLAGS in the correct manner wrt. PATM.
3571 *
3572 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3573 * @param a_fEfl The new EFLAGS.
3574 */
3575#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3576# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3577#else
3578# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3579#endif
3580
3581
3582/** @} */
3583
3584/** @name Raising Exceptions.
3585 *
3586 * @{
3587 */
3588
3589
3590/**
3591 * Loads the specified stack far pointer from the TSS.
3592 *
3593 * @returns VBox strict status code.
3594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3595 * @param uCpl The CPL to load the stack for.
3596 * @param pSelSS Where to return the new stack segment.
3597 * @param puEsp Where to return the new stack pointer.
3598 */
3599IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3600{
3601 VBOXSTRICTRC rcStrict;
3602 Assert(uCpl < 4);
3603
3604 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3605 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3606 {
3607 /*
3608 * 16-bit TSS (X86TSS16).
3609 */
3610 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3611 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3612 {
3613 uint32_t off = uCpl * 4 + 2;
3614 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3615 {
3616 /** @todo check actual access pattern here. */
3617 uint32_t u32Tmp = 0; /* gcc maybe... */
3618 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3619 if (rcStrict == VINF_SUCCESS)
3620 {
3621 *puEsp = RT_LOWORD(u32Tmp);
3622 *pSelSS = RT_HIWORD(u32Tmp);
3623 return VINF_SUCCESS;
3624 }
3625 }
3626 else
3627 {
3628 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3629 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3630 }
3631 break;
3632 }
3633
3634 /*
3635 * 32-bit TSS (X86TSS32).
3636 */
3637 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3638 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3639 {
3640 uint32_t off = uCpl * 8 + 4;
3641 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3642 {
3643/** @todo check actual access pattern here. */
3644 uint64_t u64Tmp;
3645 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3646 if (rcStrict == VINF_SUCCESS)
3647 {
3648 *puEsp = u64Tmp & UINT32_MAX;
3649 *pSelSS = (RTSEL)(u64Tmp >> 32);
3650 return VINF_SUCCESS;
3651 }
3652 }
3653 else
3654 {
3655 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3656 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3657 }
3658 break;
3659 }
3660
3661 default:
3662 AssertFailed();
3663 rcStrict = VERR_IEM_IPE_4;
3664 break;
3665 }
3666
3667 *puEsp = 0; /* make gcc happy */
3668 *pSelSS = 0; /* make gcc happy */
3669 return rcStrict;
3670}
3671
3672
3673/**
3674 * Loads the specified stack pointer from the 64-bit TSS.
3675 *
3676 * @returns VBox strict status code.
3677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3678 * @param uCpl The CPL to load the stack for.
3679 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3680 * @param puRsp Where to return the new stack pointer.
3681 */
3682IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3683{
3684 Assert(uCpl < 4);
3685 Assert(uIst < 8);
3686 *puRsp = 0; /* make gcc happy */
3687
3688 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3689 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3690
3691 uint32_t off;
3692 if (uIst)
3693 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3694 else
3695 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3696 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3697 {
3698 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3699 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3700 }
3701
3702 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3703}
3704
3705
3706/**
3707 * Adjust the CPU state according to the exception being raised.
3708 *
3709 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3710 * @param u8Vector The exception that has been raised.
3711 */
3712DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3713{
3714 switch (u8Vector)
3715 {
3716 case X86_XCPT_DB:
3717 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3718 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3719 break;
3720 /** @todo Read the AMD and Intel exception reference... */
3721 }
3722}
3723
3724
3725/**
3726 * Implements exceptions and interrupts for real mode.
3727 *
3728 * @returns VBox strict status code.
3729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3730 * @param cbInstr The number of bytes to offset rIP by in the return
3731 * address.
3732 * @param u8Vector The interrupt / exception vector number.
3733 * @param fFlags The flags.
3734 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3735 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3736 */
3737IEM_STATIC VBOXSTRICTRC
3738iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3739 uint8_t cbInstr,
3740 uint8_t u8Vector,
3741 uint32_t fFlags,
3742 uint16_t uErr,
3743 uint64_t uCr2)
3744{
3745 NOREF(uErr); NOREF(uCr2);
3746 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3747
3748 /*
3749 * Read the IDT entry.
3750 */
3751 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3752 {
3753 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3754 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3755 }
3756 RTFAR16 Idte;
3757 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3758 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3759 {
3760 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3761 return rcStrict;
3762 }
3763
3764 /*
3765 * Push the stack frame.
3766 */
3767 uint16_t *pu16Frame;
3768 uint64_t uNewRsp;
3769 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3770 if (rcStrict != VINF_SUCCESS)
3771 return rcStrict;
3772
3773 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3774#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3775 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3776 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3777 fEfl |= UINT16_C(0xf000);
3778#endif
3779 pu16Frame[2] = (uint16_t)fEfl;
3780 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3781 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3782 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3783 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3784 return rcStrict;
3785
3786 /*
3787 * Load the vector address into cs:ip and make exception specific state
3788 * adjustments.
3789 */
3790 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3791 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3792 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3793 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3794 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3795 pVCpu->cpum.GstCtx.rip = Idte.off;
3796 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3797 IEMMISC_SET_EFL(pVCpu, fEfl);
3798
3799 /** @todo do we actually do this in real mode? */
3800 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3801 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3802
3803 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3804}
3805
3806
3807/**
3808 * Loads a NULL data selector into when coming from V8086 mode.
3809 *
3810 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3811 * @param pSReg Pointer to the segment register.
3812 */
3813IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3814{
3815 pSReg->Sel = 0;
3816 pSReg->ValidSel = 0;
3817 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3818 {
3819 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3820 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3821 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3822 }
3823 else
3824 {
3825 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3826 /** @todo check this on AMD-V */
3827 pSReg->u64Base = 0;
3828 pSReg->u32Limit = 0;
3829 }
3830}
3831
3832
3833/**
3834 * Loads a segment selector during a task switch in V8086 mode.
3835 *
3836 * @param pSReg Pointer to the segment register.
3837 * @param uSel The selector value to load.
3838 */
3839IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3840{
3841 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3842 pSReg->Sel = uSel;
3843 pSReg->ValidSel = uSel;
3844 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3845 pSReg->u64Base = uSel << 4;
3846 pSReg->u32Limit = 0xffff;
3847 pSReg->Attr.u = 0xf3;
3848}
3849
3850
3851/**
3852 * Loads a NULL data selector into a selector register, both the hidden and
3853 * visible parts, in protected mode.
3854 *
3855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3856 * @param pSReg Pointer to the segment register.
3857 * @param uRpl The RPL.
3858 */
3859IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3860{
3861 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3862 * data selector in protected mode. */
3863 pSReg->Sel = uRpl;
3864 pSReg->ValidSel = uRpl;
3865 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3866 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3867 {
3868 /* VT-x (Intel 3960x) observed doing something like this. */
3869 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3870 pSReg->u32Limit = UINT32_MAX;
3871 pSReg->u64Base = 0;
3872 }
3873 else
3874 {
3875 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3876 pSReg->u32Limit = 0;
3877 pSReg->u64Base = 0;
3878 }
3879}
3880
3881
3882/**
3883 * Loads a segment selector during a task switch in protected mode.
3884 *
3885 * In this task switch scenario, we would throw \#TS exceptions rather than
3886 * \#GPs.
3887 *
3888 * @returns VBox strict status code.
3889 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3890 * @param pSReg Pointer to the segment register.
3891 * @param uSel The new selector value.
3892 *
3893 * @remarks This does _not_ handle CS or SS.
3894 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3895 */
3896IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3897{
3898 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3899
3900 /* Null data selector. */
3901 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3902 {
3903 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3904 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3905 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3906 return VINF_SUCCESS;
3907 }
3908
3909 /* Fetch the descriptor. */
3910 IEMSELDESC Desc;
3911 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3912 if (rcStrict != VINF_SUCCESS)
3913 {
3914 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3915 VBOXSTRICTRC_VAL(rcStrict)));
3916 return rcStrict;
3917 }
3918
3919 /* Must be a data segment or readable code segment. */
3920 if ( !Desc.Legacy.Gen.u1DescType
3921 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3922 {
3923 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3924 Desc.Legacy.Gen.u4Type));
3925 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3926 }
3927
3928 /* Check privileges for data segments and non-conforming code segments. */
3929 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3930 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3931 {
3932 /* The RPL and the new CPL must be less than or equal to the DPL. */
3933 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3934 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3935 {
3936 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3937 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3938 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3939 }
3940 }
3941
3942 /* Is it there? */
3943 if (!Desc.Legacy.Gen.u1Present)
3944 {
3945 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3946 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3947 }
3948
3949 /* The base and limit. */
3950 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3951 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3952
3953 /*
3954 * Ok, everything checked out fine. Now set the accessed bit before
3955 * committing the result into the registers.
3956 */
3957 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3958 {
3959 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3960 if (rcStrict != VINF_SUCCESS)
3961 return rcStrict;
3962 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3963 }
3964
3965 /* Commit */
3966 pSReg->Sel = uSel;
3967 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3968 pSReg->u32Limit = cbLimit;
3969 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3970 pSReg->ValidSel = uSel;
3971 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3972 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3973 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3974
3975 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3976 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3977 return VINF_SUCCESS;
3978}
3979
3980
3981/**
3982 * Performs a task switch.
3983 *
3984 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3985 * caller is responsible for performing the necessary checks (like DPL, TSS
3986 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3987 * reference for JMP, CALL, IRET.
3988 *
3989 * If the task switch is the due to a software interrupt or hardware exception,
3990 * the caller is responsible for validating the TSS selector and descriptor. See
3991 * Intel Instruction reference for INT n.
3992 *
3993 * @returns VBox strict status code.
3994 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3995 * @param enmTaskSwitch The cause of the task switch.
3996 * @param uNextEip The EIP effective after the task switch.
3997 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3998 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3999 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4000 * @param SelTSS The TSS selector of the new task.
4001 * @param pNewDescTSS Pointer to the new TSS descriptor.
4002 */
4003IEM_STATIC VBOXSTRICTRC
4004iemTaskSwitch(PVMCPU pVCpu,
4005 IEMTASKSWITCH enmTaskSwitch,
4006 uint32_t uNextEip,
4007 uint32_t fFlags,
4008 uint16_t uErr,
4009 uint64_t uCr2,
4010 RTSEL SelTSS,
4011 PIEMSELDESC pNewDescTSS)
4012{
4013 Assert(!IEM_IS_REAL_MODE(pVCpu));
4014 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4015 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4016
4017 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4018 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4019 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4020 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4021 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4022
4023 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4024 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4025
4026 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4027 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4028
4029 /* Update CR2 in case it's a page-fault. */
4030 /** @todo This should probably be done much earlier in IEM/PGM. See
4031 * @bugref{5653#c49}. */
4032 if (fFlags & IEM_XCPT_FLAGS_CR2)
4033 pVCpu->cpum.GstCtx.cr2 = uCr2;
4034
4035 /*
4036 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4037 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4038 */
4039 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4040 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4041 if (uNewTSSLimit < uNewTSSLimitMin)
4042 {
4043 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4044 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4045 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4046 }
4047
4048 /*
4049 * Task switches in VMX non-root mode always cause task switches.
4050 * The new TSS must have been read and validated (DPL, limits etc.) before a
4051 * task-switch VM-exit commences.
4052 *
4053 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4054 */
4055 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4056 {
4057 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4058 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4059 }
4060
4061 /*
4062 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4063 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4064 */
4065 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4066 {
4067 uint32_t const uExitInfo1 = SelTSS;
4068 uint32_t uExitInfo2 = uErr;
4069 switch (enmTaskSwitch)
4070 {
4071 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4072 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4073 default: break;
4074 }
4075 if (fFlags & IEM_XCPT_FLAGS_ERR)
4076 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4077 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4078 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4079
4080 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4081 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4082 RT_NOREF2(uExitInfo1, uExitInfo2);
4083 }
4084
4085 /*
4086 * Check the current TSS limit. The last written byte to the current TSS during the
4087 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4088 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4089 *
4090 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4091 * end up with smaller than "legal" TSS limits.
4092 */
4093 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4094 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4095 if (uCurTSSLimit < uCurTSSLimitMin)
4096 {
4097 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4098 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4099 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4100 }
4101
4102 /*
4103 * Verify that the new TSS can be accessed and map it. Map only the required contents
4104 * and not the entire TSS.
4105 */
4106 void *pvNewTSS;
4107 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4108 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4109 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4110 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4111 * not perform correct translation if this happens. See Intel spec. 7.2.1
4112 * "Task-State Segment" */
4113 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4114 if (rcStrict != VINF_SUCCESS)
4115 {
4116 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4117 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4118 return rcStrict;
4119 }
4120
4121 /*
4122 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4123 */
4124 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4125 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4126 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4127 {
4128 PX86DESC pDescCurTSS;
4129 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4130 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4131 if (rcStrict != VINF_SUCCESS)
4132 {
4133 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4134 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4135 return rcStrict;
4136 }
4137
4138 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4139 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4140 if (rcStrict != VINF_SUCCESS)
4141 {
4142 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4143 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4144 return rcStrict;
4145 }
4146
4147 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4148 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4149 {
4150 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4151 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4152 u32EFlags &= ~X86_EFL_NT;
4153 }
4154 }
4155
4156 /*
4157 * Save the CPU state into the current TSS.
4158 */
4159 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4160 if (GCPtrNewTSS == GCPtrCurTSS)
4161 {
4162 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4163 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4164 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4165 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4166 pVCpu->cpum.GstCtx.ldtr.Sel));
4167 }
4168 if (fIsNewTSS386)
4169 {
4170 /*
4171 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4172 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4173 */
4174 void *pvCurTSS32;
4175 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4176 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4177 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4178 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4179 if (rcStrict != VINF_SUCCESS)
4180 {
4181 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4182 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4183 return rcStrict;
4184 }
4185
4186 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4187 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4188 pCurTSS32->eip = uNextEip;
4189 pCurTSS32->eflags = u32EFlags;
4190 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4191 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4192 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4193 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4194 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4195 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4196 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4197 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4198 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4199 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4200 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4201 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4202 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4203 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4204
4205 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4206 if (rcStrict != VINF_SUCCESS)
4207 {
4208 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4209 VBOXSTRICTRC_VAL(rcStrict)));
4210 return rcStrict;
4211 }
4212 }
4213 else
4214 {
4215 /*
4216 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4217 */
4218 void *pvCurTSS16;
4219 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4220 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4221 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4222 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4223 if (rcStrict != VINF_SUCCESS)
4224 {
4225 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4226 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4227 return rcStrict;
4228 }
4229
4230 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4231 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4232 pCurTSS16->ip = uNextEip;
4233 pCurTSS16->flags = u32EFlags;
4234 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4235 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4236 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4237 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4238 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4239 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4240 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4241 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4242 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4243 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4244 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4245 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4246
4247 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4248 if (rcStrict != VINF_SUCCESS)
4249 {
4250 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4251 VBOXSTRICTRC_VAL(rcStrict)));
4252 return rcStrict;
4253 }
4254 }
4255
4256 /*
4257 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4258 */
4259 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4260 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4261 {
4262 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4263 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4264 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4265 }
4266
4267 /*
4268 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4269 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4270 */
4271 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4272 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4273 bool fNewDebugTrap;
4274 if (fIsNewTSS386)
4275 {
4276 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4277 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4278 uNewEip = pNewTSS32->eip;
4279 uNewEflags = pNewTSS32->eflags;
4280 uNewEax = pNewTSS32->eax;
4281 uNewEcx = pNewTSS32->ecx;
4282 uNewEdx = pNewTSS32->edx;
4283 uNewEbx = pNewTSS32->ebx;
4284 uNewEsp = pNewTSS32->esp;
4285 uNewEbp = pNewTSS32->ebp;
4286 uNewEsi = pNewTSS32->esi;
4287 uNewEdi = pNewTSS32->edi;
4288 uNewES = pNewTSS32->es;
4289 uNewCS = pNewTSS32->cs;
4290 uNewSS = pNewTSS32->ss;
4291 uNewDS = pNewTSS32->ds;
4292 uNewFS = pNewTSS32->fs;
4293 uNewGS = pNewTSS32->gs;
4294 uNewLdt = pNewTSS32->selLdt;
4295 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4296 }
4297 else
4298 {
4299 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4300 uNewCr3 = 0;
4301 uNewEip = pNewTSS16->ip;
4302 uNewEflags = pNewTSS16->flags;
4303 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4304 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4305 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4306 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4307 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4308 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4309 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4310 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4311 uNewES = pNewTSS16->es;
4312 uNewCS = pNewTSS16->cs;
4313 uNewSS = pNewTSS16->ss;
4314 uNewDS = pNewTSS16->ds;
4315 uNewFS = 0;
4316 uNewGS = 0;
4317 uNewLdt = pNewTSS16->selLdt;
4318 fNewDebugTrap = false;
4319 }
4320
4321 if (GCPtrNewTSS == GCPtrCurTSS)
4322 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4323 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4324
4325 /*
4326 * We're done accessing the new TSS.
4327 */
4328 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4329 if (rcStrict != VINF_SUCCESS)
4330 {
4331 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4332 return rcStrict;
4333 }
4334
4335 /*
4336 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4337 */
4338 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4339 {
4340 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4341 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4342 if (rcStrict != VINF_SUCCESS)
4343 {
4344 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4345 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4346 return rcStrict;
4347 }
4348
4349 /* Check that the descriptor indicates the new TSS is available (not busy). */
4350 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4351 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4352 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4353
4354 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4355 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4356 if (rcStrict != VINF_SUCCESS)
4357 {
4358 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4359 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4360 return rcStrict;
4361 }
4362 }
4363
4364 /*
4365 * From this point on, we're technically in the new task. We will defer exceptions
4366 * until the completion of the task switch but before executing any instructions in the new task.
4367 */
4368 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4369 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4370 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4371 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4372 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4373 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4374 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4375
4376 /* Set the busy bit in TR. */
4377 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4378 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4379 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4380 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4381 {
4382 uNewEflags |= X86_EFL_NT;
4383 }
4384
4385 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4386 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4387 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4388
4389 pVCpu->cpum.GstCtx.eip = uNewEip;
4390 pVCpu->cpum.GstCtx.eax = uNewEax;
4391 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4392 pVCpu->cpum.GstCtx.edx = uNewEdx;
4393 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4394 pVCpu->cpum.GstCtx.esp = uNewEsp;
4395 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4396 pVCpu->cpum.GstCtx.esi = uNewEsi;
4397 pVCpu->cpum.GstCtx.edi = uNewEdi;
4398
4399 uNewEflags &= X86_EFL_LIVE_MASK;
4400 uNewEflags |= X86_EFL_RA1_MASK;
4401 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4402
4403 /*
4404 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4405 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4406 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4407 */
4408 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4409 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4410
4411 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4412 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4413
4414 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4415 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4416
4417 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4418 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4419
4420 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4421 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4422
4423 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4424 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4425 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4426
4427 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4428 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4429 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4430 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4431
4432 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4433 {
4434 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4435 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4436 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4437 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4438 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4439 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4440 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4441 }
4442
4443 /*
4444 * Switch CR3 for the new task.
4445 */
4446 if ( fIsNewTSS386
4447 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4448 {
4449 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4450 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4451 AssertRCSuccessReturn(rc, rc);
4452
4453 /* Inform PGM. */
4454 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4455 AssertRCReturn(rc, rc);
4456 /* ignore informational status codes */
4457
4458 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4459 }
4460
4461 /*
4462 * Switch LDTR for the new task.
4463 */
4464 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4465 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4466 else
4467 {
4468 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4469
4470 IEMSELDESC DescNewLdt;
4471 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4472 if (rcStrict != VINF_SUCCESS)
4473 {
4474 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4475 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4476 return rcStrict;
4477 }
4478 if ( !DescNewLdt.Legacy.Gen.u1Present
4479 || DescNewLdt.Legacy.Gen.u1DescType
4480 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4481 {
4482 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4483 uNewLdt, DescNewLdt.Legacy.u));
4484 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4485 }
4486
4487 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4488 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4489 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4490 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4491 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4492 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4493 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4494 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4495 }
4496
4497 IEMSELDESC DescSS;
4498 if (IEM_IS_V86_MODE(pVCpu))
4499 {
4500 pVCpu->iem.s.uCpl = 3;
4501 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4502 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4503 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4504 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4505 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4506 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4507
4508 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4509 DescSS.Legacy.u = 0;
4510 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4511 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4512 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4513 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4514 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4515 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4516 DescSS.Legacy.Gen.u2Dpl = 3;
4517 }
4518 else
4519 {
4520 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4521
4522 /*
4523 * Load the stack segment for the new task.
4524 */
4525 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4526 {
4527 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4528 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4529 }
4530
4531 /* Fetch the descriptor. */
4532 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4533 if (rcStrict != VINF_SUCCESS)
4534 {
4535 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4536 VBOXSTRICTRC_VAL(rcStrict)));
4537 return rcStrict;
4538 }
4539
4540 /* SS must be a data segment and writable. */
4541 if ( !DescSS.Legacy.Gen.u1DescType
4542 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4543 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4544 {
4545 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4546 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4547 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4548 }
4549
4550 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4551 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4552 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4553 {
4554 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4555 uNewCpl));
4556 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4557 }
4558
4559 /* Is it there? */
4560 if (!DescSS.Legacy.Gen.u1Present)
4561 {
4562 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4563 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4564 }
4565
4566 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4567 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4568
4569 /* Set the accessed bit before committing the result into SS. */
4570 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4571 {
4572 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4573 if (rcStrict != VINF_SUCCESS)
4574 return rcStrict;
4575 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4576 }
4577
4578 /* Commit SS. */
4579 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4580 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4581 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4582 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4583 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4584 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4585 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4586
4587 /* CPL has changed, update IEM before loading rest of segments. */
4588 pVCpu->iem.s.uCpl = uNewCpl;
4589
4590 /*
4591 * Load the data segments for the new task.
4592 */
4593 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4594 if (rcStrict != VINF_SUCCESS)
4595 return rcStrict;
4596 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4597 if (rcStrict != VINF_SUCCESS)
4598 return rcStrict;
4599 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4600 if (rcStrict != VINF_SUCCESS)
4601 return rcStrict;
4602 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4603 if (rcStrict != VINF_SUCCESS)
4604 return rcStrict;
4605
4606 /*
4607 * Load the code segment for the new task.
4608 */
4609 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4610 {
4611 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4612 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4613 }
4614
4615 /* Fetch the descriptor. */
4616 IEMSELDESC DescCS;
4617 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4618 if (rcStrict != VINF_SUCCESS)
4619 {
4620 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4621 return rcStrict;
4622 }
4623
4624 /* CS must be a code segment. */
4625 if ( !DescCS.Legacy.Gen.u1DescType
4626 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4627 {
4628 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4629 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4630 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4631 }
4632
4633 /* For conforming CS, DPL must be less than or equal to the RPL. */
4634 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4635 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4636 {
4637 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4638 DescCS.Legacy.Gen.u2Dpl));
4639 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4640 }
4641
4642 /* For non-conforming CS, DPL must match RPL. */
4643 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4644 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4645 {
4646 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4647 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4648 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4649 }
4650
4651 /* Is it there? */
4652 if (!DescCS.Legacy.Gen.u1Present)
4653 {
4654 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4655 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4656 }
4657
4658 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4659 u64Base = X86DESC_BASE(&DescCS.Legacy);
4660
4661 /* Set the accessed bit before committing the result into CS. */
4662 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4663 {
4664 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4665 if (rcStrict != VINF_SUCCESS)
4666 return rcStrict;
4667 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4668 }
4669
4670 /* Commit CS. */
4671 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4672 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4673 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4674 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4675 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4676 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4677 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4678 }
4679
4680 /** @todo Debug trap. */
4681 if (fIsNewTSS386 && fNewDebugTrap)
4682 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4683
4684 /*
4685 * Construct the error code masks based on what caused this task switch.
4686 * See Intel Instruction reference for INT.
4687 */
4688 uint16_t uExt;
4689 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4690 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4691 {
4692 uExt = 1;
4693 }
4694 else
4695 uExt = 0;
4696
4697 /*
4698 * Push any error code on to the new stack.
4699 */
4700 if (fFlags & IEM_XCPT_FLAGS_ERR)
4701 {
4702 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4703 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4704 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4705
4706 /* Check that there is sufficient space on the stack. */
4707 /** @todo Factor out segment limit checking for normal/expand down segments
4708 * into a separate function. */
4709 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4710 {
4711 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4712 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4713 {
4714 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4715 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4716 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4717 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4718 }
4719 }
4720 else
4721 {
4722 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4723 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4724 {
4725 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4726 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4727 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4728 }
4729 }
4730
4731
4732 if (fIsNewTSS386)
4733 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4734 else
4735 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4736 if (rcStrict != VINF_SUCCESS)
4737 {
4738 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4739 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4740 return rcStrict;
4741 }
4742 }
4743
4744 /* Check the new EIP against the new CS limit. */
4745 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4746 {
4747 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4748 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4749 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4750 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4751 }
4752
4753 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4754 pVCpu->cpum.GstCtx.ss.Sel));
4755 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4756}
4757
4758
4759/**
4760 * Implements exceptions and interrupts for protected mode.
4761 *
4762 * @returns VBox strict status code.
4763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4764 * @param cbInstr The number of bytes to offset rIP by in the return
4765 * address.
4766 * @param u8Vector The interrupt / exception vector number.
4767 * @param fFlags The flags.
4768 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4769 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4770 */
4771IEM_STATIC VBOXSTRICTRC
4772iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4773 uint8_t cbInstr,
4774 uint8_t u8Vector,
4775 uint32_t fFlags,
4776 uint16_t uErr,
4777 uint64_t uCr2)
4778{
4779 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4780
4781 /*
4782 * Read the IDT entry.
4783 */
4784 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4785 {
4786 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4787 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4788 }
4789 X86DESC Idte;
4790 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4791 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4792 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4793 {
4794 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4795 return rcStrict;
4796 }
4797 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4798 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4799 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4800
4801 /*
4802 * Check the descriptor type, DPL and such.
4803 * ASSUMES this is done in the same order as described for call-gate calls.
4804 */
4805 if (Idte.Gate.u1DescType)
4806 {
4807 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4808 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4809 }
4810 bool fTaskGate = false;
4811 uint8_t f32BitGate = true;
4812 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4813 switch (Idte.Gate.u4Type)
4814 {
4815 case X86_SEL_TYPE_SYS_UNDEFINED:
4816 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4817 case X86_SEL_TYPE_SYS_LDT:
4818 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4819 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4820 case X86_SEL_TYPE_SYS_UNDEFINED2:
4821 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4822 case X86_SEL_TYPE_SYS_UNDEFINED3:
4823 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4824 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4825 case X86_SEL_TYPE_SYS_UNDEFINED4:
4826 {
4827 /** @todo check what actually happens when the type is wrong...
4828 * esp. call gates. */
4829 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4830 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4831 }
4832
4833 case X86_SEL_TYPE_SYS_286_INT_GATE:
4834 f32BitGate = false;
4835 RT_FALL_THRU();
4836 case X86_SEL_TYPE_SYS_386_INT_GATE:
4837 fEflToClear |= X86_EFL_IF;
4838 break;
4839
4840 case X86_SEL_TYPE_SYS_TASK_GATE:
4841 fTaskGate = true;
4842#ifndef IEM_IMPLEMENTS_TASKSWITCH
4843 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4844#endif
4845 break;
4846
4847 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4848 f32BitGate = false;
4849 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4850 break;
4851
4852 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4853 }
4854
4855 /* Check DPL against CPL if applicable. */
4856 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4857 {
4858 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4859 {
4860 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4861 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4862 }
4863 }
4864
4865 /* Is it there? */
4866 if (!Idte.Gate.u1Present)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4869 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4870 }
4871
4872 /* Is it a task-gate? */
4873 if (fTaskGate)
4874 {
4875 /*
4876 * Construct the error code masks based on what caused this task switch.
4877 * See Intel Instruction reference for INT.
4878 */
4879 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4880 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4881 RTSEL SelTSS = Idte.Gate.u16Sel;
4882
4883 /*
4884 * Fetch the TSS descriptor in the GDT.
4885 */
4886 IEMSELDESC DescTSS;
4887 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4888 if (rcStrict != VINF_SUCCESS)
4889 {
4890 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4891 VBOXSTRICTRC_VAL(rcStrict)));
4892 return rcStrict;
4893 }
4894
4895 /* The TSS descriptor must be a system segment and be available (not busy). */
4896 if ( DescTSS.Legacy.Gen.u1DescType
4897 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4898 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4899 {
4900 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4901 u8Vector, SelTSS, DescTSS.Legacy.au64));
4902 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4903 }
4904
4905 /* The TSS must be present. */
4906 if (!DescTSS.Legacy.Gen.u1Present)
4907 {
4908 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4909 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4910 }
4911
4912 /* Do the actual task switch. */
4913 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4914 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4915 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4916 }
4917
4918 /* A null CS is bad. */
4919 RTSEL NewCS = Idte.Gate.u16Sel;
4920 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4921 {
4922 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4923 return iemRaiseGeneralProtectionFault0(pVCpu);
4924 }
4925
4926 /* Fetch the descriptor for the new CS. */
4927 IEMSELDESC DescCS;
4928 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4929 if (rcStrict != VINF_SUCCESS)
4930 {
4931 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4932 return rcStrict;
4933 }
4934
4935 /* Must be a code segment. */
4936 if (!DescCS.Legacy.Gen.u1DescType)
4937 {
4938 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4939 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4940 }
4941 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4942 {
4943 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4944 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4945 }
4946
4947 /* Don't allow lowering the privilege level. */
4948 /** @todo Does the lowering of privileges apply to software interrupts
4949 * only? This has bearings on the more-privileged or
4950 * same-privilege stack behavior further down. A testcase would
4951 * be nice. */
4952 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4953 {
4954 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4955 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4956 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4957 }
4958
4959 /* Make sure the selector is present. */
4960 if (!DescCS.Legacy.Gen.u1Present)
4961 {
4962 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4963 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4964 }
4965
4966 /* Check the new EIP against the new CS limit. */
4967 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4968 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4969 ? Idte.Gate.u16OffsetLow
4970 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4971 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4972 if (uNewEip > cbLimitCS)
4973 {
4974 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4975 u8Vector, uNewEip, cbLimitCS, NewCS));
4976 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4977 }
4978 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4979
4980 /* Calc the flag image to push. */
4981 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4982 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4983 fEfl &= ~X86_EFL_RF;
4984 else
4985 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4986
4987 /* From V8086 mode only go to CPL 0. */
4988 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4989 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4990 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4991 {
4992 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4993 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4994 }
4995
4996 /*
4997 * If the privilege level changes, we need to get a new stack from the TSS.
4998 * This in turns means validating the new SS and ESP...
4999 */
5000 if (uNewCpl != pVCpu->iem.s.uCpl)
5001 {
5002 RTSEL NewSS;
5003 uint32_t uNewEsp;
5004 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5005 if (rcStrict != VINF_SUCCESS)
5006 return rcStrict;
5007
5008 IEMSELDESC DescSS;
5009 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5010 if (rcStrict != VINF_SUCCESS)
5011 return rcStrict;
5012 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5013 if (!DescSS.Legacy.Gen.u1DefBig)
5014 {
5015 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5016 uNewEsp = (uint16_t)uNewEsp;
5017 }
5018
5019 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5020
5021 /* Check that there is sufficient space for the stack frame. */
5022 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5023 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5024 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5025 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5026
5027 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5028 {
5029 if ( uNewEsp - 1 > cbLimitSS
5030 || uNewEsp < cbStackFrame)
5031 {
5032 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5033 u8Vector, NewSS, uNewEsp, cbStackFrame));
5034 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5035 }
5036 }
5037 else
5038 {
5039 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5040 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5041 {
5042 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5043 u8Vector, NewSS, uNewEsp, cbStackFrame));
5044 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5045 }
5046 }
5047
5048 /*
5049 * Start making changes.
5050 */
5051
5052 /* Set the new CPL so that stack accesses use it. */
5053 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5054 pVCpu->iem.s.uCpl = uNewCpl;
5055
5056 /* Create the stack frame. */
5057 RTPTRUNION uStackFrame;
5058 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5059 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5060 if (rcStrict != VINF_SUCCESS)
5061 return rcStrict;
5062 void * const pvStackFrame = uStackFrame.pv;
5063 if (f32BitGate)
5064 {
5065 if (fFlags & IEM_XCPT_FLAGS_ERR)
5066 *uStackFrame.pu32++ = uErr;
5067 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5068 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5069 uStackFrame.pu32[2] = fEfl;
5070 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5071 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5072 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5073 if (fEfl & X86_EFL_VM)
5074 {
5075 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5076 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5077 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5078 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5079 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5080 }
5081 }
5082 else
5083 {
5084 if (fFlags & IEM_XCPT_FLAGS_ERR)
5085 *uStackFrame.pu16++ = uErr;
5086 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5087 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5088 uStackFrame.pu16[2] = fEfl;
5089 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5090 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5091 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5092 if (fEfl & X86_EFL_VM)
5093 {
5094 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5095 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5096 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5097 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5098 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5099 }
5100 }
5101 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5102 if (rcStrict != VINF_SUCCESS)
5103 return rcStrict;
5104
5105 /* Mark the selectors 'accessed' (hope this is the correct time). */
5106 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5107 * after pushing the stack frame? (Write protect the gdt + stack to
5108 * find out.) */
5109 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5110 {
5111 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5112 if (rcStrict != VINF_SUCCESS)
5113 return rcStrict;
5114 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5115 }
5116
5117 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5118 {
5119 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5120 if (rcStrict != VINF_SUCCESS)
5121 return rcStrict;
5122 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5123 }
5124
5125 /*
5126 * Start comitting the register changes (joins with the DPL=CPL branch).
5127 */
5128 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5129 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5130 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5131 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5132 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5133 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5134 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5135 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5136 * SP is loaded).
5137 * Need to check the other combinations too:
5138 * - 16-bit TSS, 32-bit handler
5139 * - 32-bit TSS, 16-bit handler */
5140 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5141 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5142 else
5143 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5144
5145 if (fEfl & X86_EFL_VM)
5146 {
5147 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5148 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5149 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5150 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5151 }
5152 }
5153 /*
5154 * Same privilege, no stack change and smaller stack frame.
5155 */
5156 else
5157 {
5158 uint64_t uNewRsp;
5159 RTPTRUNION uStackFrame;
5160 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5161 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5162 if (rcStrict != VINF_SUCCESS)
5163 return rcStrict;
5164 void * const pvStackFrame = uStackFrame.pv;
5165
5166 if (f32BitGate)
5167 {
5168 if (fFlags & IEM_XCPT_FLAGS_ERR)
5169 *uStackFrame.pu32++ = uErr;
5170 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5171 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5172 uStackFrame.pu32[2] = fEfl;
5173 }
5174 else
5175 {
5176 if (fFlags & IEM_XCPT_FLAGS_ERR)
5177 *uStackFrame.pu16++ = uErr;
5178 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5179 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5180 uStackFrame.pu16[2] = fEfl;
5181 }
5182 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5183 if (rcStrict != VINF_SUCCESS)
5184 return rcStrict;
5185
5186 /* Mark the CS selector as 'accessed'. */
5187 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5188 {
5189 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5190 if (rcStrict != VINF_SUCCESS)
5191 return rcStrict;
5192 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5193 }
5194
5195 /*
5196 * Start committing the register changes (joins with the other branch).
5197 */
5198 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5199 }
5200
5201 /* ... register committing continues. */
5202 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5203 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5204 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5205 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5206 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5207 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5208
5209 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5210 fEfl &= ~fEflToClear;
5211 IEMMISC_SET_EFL(pVCpu, fEfl);
5212
5213 if (fFlags & IEM_XCPT_FLAGS_CR2)
5214 pVCpu->cpum.GstCtx.cr2 = uCr2;
5215
5216 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5217 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5218
5219 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5220}
5221
5222
5223/**
5224 * Implements exceptions and interrupts for long mode.
5225 *
5226 * @returns VBox strict status code.
5227 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5228 * @param cbInstr The number of bytes to offset rIP by in the return
5229 * address.
5230 * @param u8Vector The interrupt / exception vector number.
5231 * @param fFlags The flags.
5232 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5233 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5234 */
5235IEM_STATIC VBOXSTRICTRC
5236iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5237 uint8_t cbInstr,
5238 uint8_t u8Vector,
5239 uint32_t fFlags,
5240 uint16_t uErr,
5241 uint64_t uCr2)
5242{
5243 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5244
5245 /*
5246 * Read the IDT entry.
5247 */
5248 uint16_t offIdt = (uint16_t)u8Vector << 4;
5249 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5250 {
5251 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5252 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5253 }
5254 X86DESC64 Idte;
5255 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5256 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5257 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5258 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5259 {
5260 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5261 return rcStrict;
5262 }
5263 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5264 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5265 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5266
5267 /*
5268 * Check the descriptor type, DPL and such.
5269 * ASSUMES this is done in the same order as described for call-gate calls.
5270 */
5271 if (Idte.Gate.u1DescType)
5272 {
5273 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5274 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5275 }
5276 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5277 switch (Idte.Gate.u4Type)
5278 {
5279 case AMD64_SEL_TYPE_SYS_INT_GATE:
5280 fEflToClear |= X86_EFL_IF;
5281 break;
5282 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5283 break;
5284
5285 default:
5286 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5287 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5288 }
5289
5290 /* Check DPL against CPL if applicable. */
5291 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5292 {
5293 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5294 {
5295 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5296 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5297 }
5298 }
5299
5300 /* Is it there? */
5301 if (!Idte.Gate.u1Present)
5302 {
5303 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5304 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5305 }
5306
5307 /* A null CS is bad. */
5308 RTSEL NewCS = Idte.Gate.u16Sel;
5309 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5310 {
5311 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5312 return iemRaiseGeneralProtectionFault0(pVCpu);
5313 }
5314
5315 /* Fetch the descriptor for the new CS. */
5316 IEMSELDESC DescCS;
5317 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5318 if (rcStrict != VINF_SUCCESS)
5319 {
5320 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5321 return rcStrict;
5322 }
5323
5324 /* Must be a 64-bit code segment. */
5325 if (!DescCS.Long.Gen.u1DescType)
5326 {
5327 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5328 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5329 }
5330 if ( !DescCS.Long.Gen.u1Long
5331 || DescCS.Long.Gen.u1DefBig
5332 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5333 {
5334 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5335 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5336 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5337 }
5338
5339 /* Don't allow lowering the privilege level. For non-conforming CS
5340 selectors, the CS.DPL sets the privilege level the trap/interrupt
5341 handler runs at. For conforming CS selectors, the CPL remains
5342 unchanged, but the CS.DPL must be <= CPL. */
5343 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5344 * when CPU in Ring-0. Result \#GP? */
5345 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5346 {
5347 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5348 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5349 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5350 }
5351
5352
5353 /* Make sure the selector is present. */
5354 if (!DescCS.Legacy.Gen.u1Present)
5355 {
5356 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5357 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5358 }
5359
5360 /* Check that the new RIP is canonical. */
5361 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5362 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5363 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5364 if (!IEM_IS_CANONICAL(uNewRip))
5365 {
5366 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5367 return iemRaiseGeneralProtectionFault0(pVCpu);
5368 }
5369
5370 /*
5371 * If the privilege level changes or if the IST isn't zero, we need to get
5372 * a new stack from the TSS.
5373 */
5374 uint64_t uNewRsp;
5375 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5376 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5377 if ( uNewCpl != pVCpu->iem.s.uCpl
5378 || Idte.Gate.u3IST != 0)
5379 {
5380 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5381 if (rcStrict != VINF_SUCCESS)
5382 return rcStrict;
5383 }
5384 else
5385 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5386 uNewRsp &= ~(uint64_t)0xf;
5387
5388 /*
5389 * Calc the flag image to push.
5390 */
5391 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5392 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5393 fEfl &= ~X86_EFL_RF;
5394 else
5395 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5396
5397 /*
5398 * Start making changes.
5399 */
5400 /* Set the new CPL so that stack accesses use it. */
5401 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5402 pVCpu->iem.s.uCpl = uNewCpl;
5403
5404 /* Create the stack frame. */
5405 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5406 RTPTRUNION uStackFrame;
5407 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5408 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5409 if (rcStrict != VINF_SUCCESS)
5410 return rcStrict;
5411 void * const pvStackFrame = uStackFrame.pv;
5412
5413 if (fFlags & IEM_XCPT_FLAGS_ERR)
5414 *uStackFrame.pu64++ = uErr;
5415 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5416 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5417 uStackFrame.pu64[2] = fEfl;
5418 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5419 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5420 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5421 if (rcStrict != VINF_SUCCESS)
5422 return rcStrict;
5423
5424 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5425 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5426 * after pushing the stack frame? (Write protect the gdt + stack to
5427 * find out.) */
5428 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5429 {
5430 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5431 if (rcStrict != VINF_SUCCESS)
5432 return rcStrict;
5433 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5434 }
5435
5436 /*
5437 * Start comitting the register changes.
5438 */
5439 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5440 * hidden registers when interrupting 32-bit or 16-bit code! */
5441 if (uNewCpl != uOldCpl)
5442 {
5443 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5444 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5445 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5446 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5447 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5448 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5449 }
5450 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5451 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5452 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5453 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5454 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5455 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5456 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5457 pVCpu->cpum.GstCtx.rip = uNewRip;
5458
5459 fEfl &= ~fEflToClear;
5460 IEMMISC_SET_EFL(pVCpu, fEfl);
5461
5462 if (fFlags & IEM_XCPT_FLAGS_CR2)
5463 pVCpu->cpum.GstCtx.cr2 = uCr2;
5464
5465 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5466 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5467
5468 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5469}
5470
5471
5472/**
5473 * Implements exceptions and interrupts.
5474 *
5475 * All exceptions and interrupts goes thru this function!
5476 *
5477 * @returns VBox strict status code.
5478 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5479 * @param cbInstr The number of bytes to offset rIP by in the return
5480 * address.
5481 * @param u8Vector The interrupt / exception vector number.
5482 * @param fFlags The flags.
5483 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5484 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5485 */
5486DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5487iemRaiseXcptOrInt(PVMCPU pVCpu,
5488 uint8_t cbInstr,
5489 uint8_t u8Vector,
5490 uint32_t fFlags,
5491 uint16_t uErr,
5492 uint64_t uCr2)
5493{
5494 /*
5495 * Get all the state that we might need here.
5496 */
5497 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5498 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5499
5500#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5501 /*
5502 * Flush prefetch buffer
5503 */
5504 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5505#endif
5506
5507 /*
5508 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5509 */
5510 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5511 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5512 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5513 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5514 {
5515 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5516 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5517 u8Vector = X86_XCPT_GP;
5518 uErr = 0;
5519 }
5520#ifdef DBGFTRACE_ENABLED
5521 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5522 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5523 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5524#endif
5525
5526#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5527 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5528 {
5529 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5530 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5531 return rcStrict0;
5532 }
5533#endif
5534
5535#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5536 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5537 {
5538 /*
5539 * If the event is being injected as part of VMRUN, it isn't subject to event
5540 * intercepts in the nested-guest. However, secondary exceptions that occur
5541 * during injection of any event -are- subject to exception intercepts.
5542 *
5543 * See AMD spec. 15.20 "Event Injection".
5544 */
5545 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5546 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5547 else
5548 {
5549 /*
5550 * Check and handle if the event being raised is intercepted.
5551 */
5552 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5553 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5554 return rcStrict0;
5555 }
5556 }
5557#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5558
5559 /*
5560 * Do recursion accounting.
5561 */
5562 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5563 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5564 if (pVCpu->iem.s.cXcptRecursions == 0)
5565 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5566 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5567 else
5568 {
5569 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5570 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5571 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5572
5573 if (pVCpu->iem.s.cXcptRecursions >= 4)
5574 {
5575#ifdef DEBUG_bird
5576 AssertFailed();
5577#endif
5578 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5579 }
5580
5581 /*
5582 * Evaluate the sequence of recurring events.
5583 */
5584 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5585 NULL /* pXcptRaiseInfo */);
5586 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5587 { /* likely */ }
5588 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5589 {
5590 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5591 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5592 u8Vector = X86_XCPT_DF;
5593 uErr = 0;
5594 /** @todo NSTVMX: Do we need to do something here for VMX? */
5595 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5596 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5597 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5598 }
5599 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5600 {
5601 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5602 return iemInitiateCpuShutdown(pVCpu);
5603 }
5604 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5605 {
5606 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5607 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5608 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5609 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5610 return VERR_EM_GUEST_CPU_HANG;
5611 }
5612 else
5613 {
5614 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5615 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5616 return VERR_IEM_IPE_9;
5617 }
5618
5619 /*
5620 * The 'EXT' bit is set when an exception occurs during deliver of an external
5621 * event (such as an interrupt or earlier exception)[1]. Privileged software
5622 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5623 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5624 *
5625 * [1] - Intel spec. 6.13 "Error Code"
5626 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5627 * [3] - Intel Instruction reference for INT n.
5628 */
5629 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5630 && (fFlags & IEM_XCPT_FLAGS_ERR)
5631 && u8Vector != X86_XCPT_PF
5632 && u8Vector != X86_XCPT_DF)
5633 {
5634 uErr |= X86_TRAP_ERR_EXTERNAL;
5635 }
5636 }
5637
5638 pVCpu->iem.s.cXcptRecursions++;
5639 pVCpu->iem.s.uCurXcpt = u8Vector;
5640 pVCpu->iem.s.fCurXcpt = fFlags;
5641 pVCpu->iem.s.uCurXcptErr = uErr;
5642 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5643
5644 /*
5645 * Extensive logging.
5646 */
5647#if defined(LOG_ENABLED) && defined(IN_RING3)
5648 if (LogIs3Enabled())
5649 {
5650 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5651 PVM pVM = pVCpu->CTX_SUFF(pVM);
5652 char szRegs[4096];
5653 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5654 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5655 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5656 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5657 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5658 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5659 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5660 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5661 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5662 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5663 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5664 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5665 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5666 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5667 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5668 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5669 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5670 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5671 " efer=%016VR{efer}\n"
5672 " pat=%016VR{pat}\n"
5673 " sf_mask=%016VR{sf_mask}\n"
5674 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5675 " lstar=%016VR{lstar}\n"
5676 " star=%016VR{star} cstar=%016VR{cstar}\n"
5677 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5678 );
5679
5680 char szInstr[256];
5681 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5682 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5683 szInstr, sizeof(szInstr), NULL);
5684 Log3(("%s%s\n", szRegs, szInstr));
5685 }
5686#endif /* LOG_ENABLED */
5687
5688 /*
5689 * Call the mode specific worker function.
5690 */
5691 VBOXSTRICTRC rcStrict;
5692 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5693 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5694 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5695 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5696 else
5697 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5698
5699 /* Flush the prefetch buffer. */
5700#ifdef IEM_WITH_CODE_TLB
5701 pVCpu->iem.s.pbInstrBuf = NULL;
5702#else
5703 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5704#endif
5705
5706 /*
5707 * Unwind.
5708 */
5709 pVCpu->iem.s.cXcptRecursions--;
5710 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5711 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5712 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5713 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5714 pVCpu->iem.s.cXcptRecursions + 1));
5715 return rcStrict;
5716}
5717
5718#ifdef IEM_WITH_SETJMP
5719/**
5720 * See iemRaiseXcptOrInt. Will not return.
5721 */
5722IEM_STATIC DECL_NO_RETURN(void)
5723iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5724 uint8_t cbInstr,
5725 uint8_t u8Vector,
5726 uint32_t fFlags,
5727 uint16_t uErr,
5728 uint64_t uCr2)
5729{
5730 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5731 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5732}
5733#endif
5734
5735
5736/** \#DE - 00. */
5737DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5738{
5739 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5740}
5741
5742
5743/** \#DB - 01.
5744 * @note This automatically clear DR7.GD. */
5745DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5746{
5747 /** @todo set/clear RF. */
5748 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5749 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5750}
5751
5752
5753/** \#BR - 05. */
5754DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5755{
5756 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5757}
5758
5759
5760/** \#UD - 06. */
5761DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5762{
5763 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5764}
5765
5766
5767/** \#NM - 07. */
5768DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5769{
5770 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5771}
5772
5773
5774/** \#TS(err) - 0a. */
5775DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5776{
5777 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5778}
5779
5780
5781/** \#TS(tr) - 0a. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5785 pVCpu->cpum.GstCtx.tr.Sel, 0);
5786}
5787
5788
5789/** \#TS(0) - 0a. */
5790DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5791{
5792 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5793 0, 0);
5794}
5795
5796
5797/** \#TS(err) - 0a. */
5798DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5799{
5800 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5801 uSel & X86_SEL_MASK_OFF_RPL, 0);
5802}
5803
5804
5805/** \#NP(err) - 0b. */
5806DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5807{
5808 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5809}
5810
5811
5812/** \#NP(sel) - 0b. */
5813DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5814{
5815 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5816 uSel & ~X86_SEL_RPL, 0);
5817}
5818
5819
5820/** \#SS(seg) - 0c. */
5821DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5822{
5823 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5824 uSel & ~X86_SEL_RPL, 0);
5825}
5826
5827
5828/** \#SS(err) - 0c. */
5829DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5830{
5831 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5832}
5833
5834
5835/** \#GP(n) - 0d. */
5836DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5837{
5838 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5839}
5840
5841
5842/** \#GP(0) - 0d. */
5843DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5844{
5845 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5846}
5847
5848#ifdef IEM_WITH_SETJMP
5849/** \#GP(0) - 0d. */
5850DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5851{
5852 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5853}
5854#endif
5855
5856
5857/** \#GP(sel) - 0d. */
5858DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5859{
5860 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5861 Sel & ~X86_SEL_RPL, 0);
5862}
5863
5864
5865/** \#GP(0) - 0d. */
5866DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5867{
5868 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5869}
5870
5871
5872/** \#GP(sel) - 0d. */
5873DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5874{
5875 NOREF(iSegReg); NOREF(fAccess);
5876 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5877 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5878}
5879
5880#ifdef IEM_WITH_SETJMP
5881/** \#GP(sel) - 0d, longjmp. */
5882DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5883{
5884 NOREF(iSegReg); NOREF(fAccess);
5885 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5886 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5887}
5888#endif
5889
5890/** \#GP(sel) - 0d. */
5891DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5892{
5893 NOREF(Sel);
5894 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5895}
5896
5897#ifdef IEM_WITH_SETJMP
5898/** \#GP(sel) - 0d, longjmp. */
5899DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5900{
5901 NOREF(Sel);
5902 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5903}
5904#endif
5905
5906
5907/** \#GP(sel) - 0d. */
5908DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5909{
5910 NOREF(iSegReg); NOREF(fAccess);
5911 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5912}
5913
5914#ifdef IEM_WITH_SETJMP
5915/** \#GP(sel) - 0d, longjmp. */
5916DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5917 uint32_t fAccess)
5918{
5919 NOREF(iSegReg); NOREF(fAccess);
5920 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5921}
5922#endif
5923
5924
5925/** \#PF(n) - 0e. */
5926DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5927{
5928 uint16_t uErr;
5929 switch (rc)
5930 {
5931 case VERR_PAGE_NOT_PRESENT:
5932 case VERR_PAGE_TABLE_NOT_PRESENT:
5933 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5934 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5935 uErr = 0;
5936 break;
5937
5938 default:
5939 AssertMsgFailed(("%Rrc\n", rc));
5940 RT_FALL_THRU();
5941 case VERR_ACCESS_DENIED:
5942 uErr = X86_TRAP_PF_P;
5943 break;
5944
5945 /** @todo reserved */
5946 }
5947
5948 if (pVCpu->iem.s.uCpl == 3)
5949 uErr |= X86_TRAP_PF_US;
5950
5951 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5952 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5953 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5954 uErr |= X86_TRAP_PF_ID;
5955
5956#if 0 /* This is so much non-sense, really. Why was it done like that? */
5957 /* Note! RW access callers reporting a WRITE protection fault, will clear
5958 the READ flag before calling. So, read-modify-write accesses (RW)
5959 can safely be reported as READ faults. */
5960 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5961 uErr |= X86_TRAP_PF_RW;
5962#else
5963 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5964 {
5965 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5966 uErr |= X86_TRAP_PF_RW;
5967 }
5968#endif
5969
5970 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5971 uErr, GCPtrWhere);
5972}
5973
5974#ifdef IEM_WITH_SETJMP
5975/** \#PF(n) - 0e, longjmp. */
5976IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5977{
5978 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5979}
5980#endif
5981
5982
5983/** \#MF(0) - 10. */
5984DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5985{
5986 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5987}
5988
5989
5990/** \#AC(0) - 11. */
5991DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
5992{
5993 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5994}
5995
5996
5997/**
5998 * Macro for calling iemCImplRaiseDivideError().
5999 *
6000 * This enables us to add/remove arguments and force different levels of
6001 * inlining as we wish.
6002 *
6003 * @return Strict VBox status code.
6004 */
6005#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6006IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6007{
6008 NOREF(cbInstr);
6009 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6010}
6011
6012
6013/**
6014 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6015 *
6016 * This enables us to add/remove arguments and force different levels of
6017 * inlining as we wish.
6018 *
6019 * @return Strict VBox status code.
6020 */
6021#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6022IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6023{
6024 NOREF(cbInstr);
6025 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6026}
6027
6028
6029/**
6030 * Macro for calling iemCImplRaiseInvalidOpcode().
6031 *
6032 * This enables us to add/remove arguments and force different levels of
6033 * inlining as we wish.
6034 *
6035 * @return Strict VBox status code.
6036 */
6037#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6038IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6039{
6040 NOREF(cbInstr);
6041 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6042}
6043
6044
6045/** @} */
6046
6047
6048/*
6049 *
6050 * Helpers routines.
6051 * Helpers routines.
6052 * Helpers routines.
6053 *
6054 */
6055
6056/**
6057 * Recalculates the effective operand size.
6058 *
6059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6060 */
6061IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6062{
6063 switch (pVCpu->iem.s.enmCpuMode)
6064 {
6065 case IEMMODE_16BIT:
6066 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6067 break;
6068 case IEMMODE_32BIT:
6069 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6070 break;
6071 case IEMMODE_64BIT:
6072 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6073 {
6074 case 0:
6075 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6076 break;
6077 case IEM_OP_PRF_SIZE_OP:
6078 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6079 break;
6080 case IEM_OP_PRF_SIZE_REX_W:
6081 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6082 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6083 break;
6084 }
6085 break;
6086 default:
6087 AssertFailed();
6088 }
6089}
6090
6091
6092/**
6093 * Sets the default operand size to 64-bit and recalculates the effective
6094 * operand size.
6095 *
6096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6097 */
6098IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6099{
6100 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6101 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6102 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6103 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6104 else
6105 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6106}
6107
6108
6109/*
6110 *
6111 * Common opcode decoders.
6112 * Common opcode decoders.
6113 * Common opcode decoders.
6114 *
6115 */
6116//#include <iprt/mem.h>
6117
6118/**
6119 * Used to add extra details about a stub case.
6120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6121 */
6122IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6123{
6124#if defined(LOG_ENABLED) && defined(IN_RING3)
6125 PVM pVM = pVCpu->CTX_SUFF(pVM);
6126 char szRegs[4096];
6127 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6128 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6129 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6130 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6131 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6132 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6133 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6134 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6135 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6136 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6137 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6138 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6139 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6140 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6141 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6142 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6143 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6144 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6145 " efer=%016VR{efer}\n"
6146 " pat=%016VR{pat}\n"
6147 " sf_mask=%016VR{sf_mask}\n"
6148 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6149 " lstar=%016VR{lstar}\n"
6150 " star=%016VR{star} cstar=%016VR{cstar}\n"
6151 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6152 );
6153
6154 char szInstr[256];
6155 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6156 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6157 szInstr, sizeof(szInstr), NULL);
6158
6159 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6160#else
6161 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6162#endif
6163}
6164
6165/**
6166 * Complains about a stub.
6167 *
6168 * Providing two versions of this macro, one for daily use and one for use when
6169 * working on IEM.
6170 */
6171#if 0
6172# define IEMOP_BITCH_ABOUT_STUB() \
6173 do { \
6174 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6175 iemOpStubMsg2(pVCpu); \
6176 RTAssertPanic(); \
6177 } while (0)
6178#else
6179# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6180#endif
6181
6182/** Stubs an opcode. */
6183#define FNIEMOP_STUB(a_Name) \
6184 FNIEMOP_DEF(a_Name) \
6185 { \
6186 RT_NOREF_PV(pVCpu); \
6187 IEMOP_BITCH_ABOUT_STUB(); \
6188 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6189 } \
6190 typedef int ignore_semicolon
6191
6192/** Stubs an opcode. */
6193#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6194 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6195 { \
6196 RT_NOREF_PV(pVCpu); \
6197 RT_NOREF_PV(a_Name0); \
6198 IEMOP_BITCH_ABOUT_STUB(); \
6199 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6200 } \
6201 typedef int ignore_semicolon
6202
6203/** Stubs an opcode which currently should raise \#UD. */
6204#define FNIEMOP_UD_STUB(a_Name) \
6205 FNIEMOP_DEF(a_Name) \
6206 { \
6207 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6208 return IEMOP_RAISE_INVALID_OPCODE(); \
6209 } \
6210 typedef int ignore_semicolon
6211
6212/** Stubs an opcode which currently should raise \#UD. */
6213#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6214 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6215 { \
6216 RT_NOREF_PV(pVCpu); \
6217 RT_NOREF_PV(a_Name0); \
6218 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6219 return IEMOP_RAISE_INVALID_OPCODE(); \
6220 } \
6221 typedef int ignore_semicolon
6222
6223
6224
6225/** @name Register Access.
6226 * @{
6227 */
6228
6229/**
6230 * Gets a reference (pointer) to the specified hidden segment register.
6231 *
6232 * @returns Hidden register reference.
6233 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6234 * @param iSegReg The segment register.
6235 */
6236IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6237{
6238 Assert(iSegReg < X86_SREG_COUNT);
6239 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6240 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6241
6242#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6243 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6244 { /* likely */ }
6245 else
6246 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6247#else
6248 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6249#endif
6250 return pSReg;
6251}
6252
6253
6254/**
6255 * Ensures that the given hidden segment register is up to date.
6256 *
6257 * @returns Hidden register reference.
6258 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6259 * @param pSReg The segment register.
6260 */
6261IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6262{
6263#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6264 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6265 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6266#else
6267 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6268 NOREF(pVCpu);
6269#endif
6270 return pSReg;
6271}
6272
6273
6274/**
6275 * Gets a reference (pointer) to the specified segment register (the selector
6276 * value).
6277 *
6278 * @returns Pointer to the selector variable.
6279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6280 * @param iSegReg The segment register.
6281 */
6282DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6283{
6284 Assert(iSegReg < X86_SREG_COUNT);
6285 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6286 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6287}
6288
6289
6290/**
6291 * Fetches the selector value of a segment register.
6292 *
6293 * @returns The selector value.
6294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6295 * @param iSegReg The segment register.
6296 */
6297DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6298{
6299 Assert(iSegReg < X86_SREG_COUNT);
6300 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6301 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6302}
6303
6304
6305/**
6306 * Fetches the base address value of a segment register.
6307 *
6308 * @returns The selector value.
6309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6310 * @param iSegReg The segment register.
6311 */
6312DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6313{
6314 Assert(iSegReg < X86_SREG_COUNT);
6315 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6316 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6317}
6318
6319
6320/**
6321 * Gets a reference (pointer) to the specified general purpose register.
6322 *
6323 * @returns Register reference.
6324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6325 * @param iReg The general purpose register.
6326 */
6327DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6328{
6329 Assert(iReg < 16);
6330 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6331}
6332
6333
6334/**
6335 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6336 *
6337 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6338 *
6339 * @returns Register reference.
6340 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6341 * @param iReg The register.
6342 */
6343DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6344{
6345 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6346 {
6347 Assert(iReg < 16);
6348 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6349 }
6350 /* high 8-bit register. */
6351 Assert(iReg < 8);
6352 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6353}
6354
6355
6356/**
6357 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6358 *
6359 * @returns Register reference.
6360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6361 * @param iReg The register.
6362 */
6363DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6364{
6365 Assert(iReg < 16);
6366 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6367}
6368
6369
6370/**
6371 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6372 *
6373 * @returns Register reference.
6374 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6375 * @param iReg The register.
6376 */
6377DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6378{
6379 Assert(iReg < 16);
6380 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6381}
6382
6383
6384/**
6385 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6386 *
6387 * @returns Register reference.
6388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6389 * @param iReg The register.
6390 */
6391DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6392{
6393 Assert(iReg < 64);
6394 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6395}
6396
6397
6398/**
6399 * Gets a reference (pointer) to the specified segment register's base address.
6400 *
6401 * @returns Segment register base address reference.
6402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6403 * @param iSegReg The segment selector.
6404 */
6405DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6406{
6407 Assert(iSegReg < X86_SREG_COUNT);
6408 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6409 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6410}
6411
6412
6413/**
6414 * Fetches the value of a 8-bit general purpose register.
6415 *
6416 * @returns The register value.
6417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6418 * @param iReg The register.
6419 */
6420DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6421{
6422 return *iemGRegRefU8(pVCpu, iReg);
6423}
6424
6425
6426/**
6427 * Fetches the value of a 16-bit general purpose register.
6428 *
6429 * @returns The register value.
6430 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6431 * @param iReg The register.
6432 */
6433DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6434{
6435 Assert(iReg < 16);
6436 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6437}
6438
6439
6440/**
6441 * Fetches the value of a 32-bit general purpose register.
6442 *
6443 * @returns The register value.
6444 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6445 * @param iReg The register.
6446 */
6447DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6448{
6449 Assert(iReg < 16);
6450 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6451}
6452
6453
6454/**
6455 * Fetches the value of a 64-bit general purpose register.
6456 *
6457 * @returns The register value.
6458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6459 * @param iReg The register.
6460 */
6461DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6462{
6463 Assert(iReg < 16);
6464 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6465}
6466
6467
6468/**
6469 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6470 *
6471 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6472 * segment limit.
6473 *
6474 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6475 * @param offNextInstr The offset of the next instruction.
6476 */
6477IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6478{
6479 switch (pVCpu->iem.s.enmEffOpSize)
6480 {
6481 case IEMMODE_16BIT:
6482 {
6483 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6484 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6485 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6486 return iemRaiseGeneralProtectionFault0(pVCpu);
6487 pVCpu->cpum.GstCtx.rip = uNewIp;
6488 break;
6489 }
6490
6491 case IEMMODE_32BIT:
6492 {
6493 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6494 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6495
6496 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6497 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6498 return iemRaiseGeneralProtectionFault0(pVCpu);
6499 pVCpu->cpum.GstCtx.rip = uNewEip;
6500 break;
6501 }
6502
6503 case IEMMODE_64BIT:
6504 {
6505 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6506
6507 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6508 if (!IEM_IS_CANONICAL(uNewRip))
6509 return iemRaiseGeneralProtectionFault0(pVCpu);
6510 pVCpu->cpum.GstCtx.rip = uNewRip;
6511 break;
6512 }
6513
6514 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6515 }
6516
6517 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6518
6519#ifndef IEM_WITH_CODE_TLB
6520 /* Flush the prefetch buffer. */
6521 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6522#endif
6523
6524 return VINF_SUCCESS;
6525}
6526
6527
6528/**
6529 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6530 *
6531 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6532 * segment limit.
6533 *
6534 * @returns Strict VBox status code.
6535 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6536 * @param offNextInstr The offset of the next instruction.
6537 */
6538IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6539{
6540 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6541
6542 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6543 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6544 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6545 return iemRaiseGeneralProtectionFault0(pVCpu);
6546 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6547 pVCpu->cpum.GstCtx.rip = uNewIp;
6548 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6549
6550#ifndef IEM_WITH_CODE_TLB
6551 /* Flush the prefetch buffer. */
6552 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6553#endif
6554
6555 return VINF_SUCCESS;
6556}
6557
6558
6559/**
6560 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6561 *
6562 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6563 * segment limit.
6564 *
6565 * @returns Strict VBox status code.
6566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6567 * @param offNextInstr The offset of the next instruction.
6568 */
6569IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6570{
6571 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6572
6573 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6574 {
6575 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6576
6577 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6578 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6579 return iemRaiseGeneralProtectionFault0(pVCpu);
6580 pVCpu->cpum.GstCtx.rip = uNewEip;
6581 }
6582 else
6583 {
6584 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6585
6586 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6587 if (!IEM_IS_CANONICAL(uNewRip))
6588 return iemRaiseGeneralProtectionFault0(pVCpu);
6589 pVCpu->cpum.GstCtx.rip = uNewRip;
6590 }
6591 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6592
6593#ifndef IEM_WITH_CODE_TLB
6594 /* Flush the prefetch buffer. */
6595 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6596#endif
6597
6598 return VINF_SUCCESS;
6599}
6600
6601
6602/**
6603 * Performs a near jump to the specified address.
6604 *
6605 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6606 * segment limit.
6607 *
6608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6609 * @param uNewRip The new RIP value.
6610 */
6611IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6612{
6613 switch (pVCpu->iem.s.enmEffOpSize)
6614 {
6615 case IEMMODE_16BIT:
6616 {
6617 Assert(uNewRip <= UINT16_MAX);
6618 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6619 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6620 return iemRaiseGeneralProtectionFault0(pVCpu);
6621 /** @todo Test 16-bit jump in 64-bit mode. */
6622 pVCpu->cpum.GstCtx.rip = uNewRip;
6623 break;
6624 }
6625
6626 case IEMMODE_32BIT:
6627 {
6628 Assert(uNewRip <= UINT32_MAX);
6629 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6630 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6631
6632 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6633 return iemRaiseGeneralProtectionFault0(pVCpu);
6634 pVCpu->cpum.GstCtx.rip = uNewRip;
6635 break;
6636 }
6637
6638 case IEMMODE_64BIT:
6639 {
6640 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6641
6642 if (!IEM_IS_CANONICAL(uNewRip))
6643 return iemRaiseGeneralProtectionFault0(pVCpu);
6644 pVCpu->cpum.GstCtx.rip = uNewRip;
6645 break;
6646 }
6647
6648 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6649 }
6650
6651 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6652
6653#ifndef IEM_WITH_CODE_TLB
6654 /* Flush the prefetch buffer. */
6655 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6656#endif
6657
6658 return VINF_SUCCESS;
6659}
6660
6661
6662/**
6663 * Get the address of the top of the stack.
6664 *
6665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6666 */
6667DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6668{
6669 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6670 return pVCpu->cpum.GstCtx.rsp;
6671 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6672 return pVCpu->cpum.GstCtx.esp;
6673 return pVCpu->cpum.GstCtx.sp;
6674}
6675
6676
6677/**
6678 * Updates the RIP/EIP/IP to point to the next instruction.
6679 *
6680 * This function leaves the EFLAGS.RF flag alone.
6681 *
6682 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6683 * @param cbInstr The number of bytes to add.
6684 */
6685IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6686{
6687 switch (pVCpu->iem.s.enmCpuMode)
6688 {
6689 case IEMMODE_16BIT:
6690 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6691 pVCpu->cpum.GstCtx.eip += cbInstr;
6692 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6693 break;
6694
6695 case IEMMODE_32BIT:
6696 pVCpu->cpum.GstCtx.eip += cbInstr;
6697 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6698 break;
6699
6700 case IEMMODE_64BIT:
6701 pVCpu->cpum.GstCtx.rip += cbInstr;
6702 break;
6703 default: AssertFailed();
6704 }
6705}
6706
6707
6708#if 0
6709/**
6710 * Updates the RIP/EIP/IP to point to the next instruction.
6711 *
6712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6713 */
6714IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6715{
6716 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6717}
6718#endif
6719
6720
6721
6722/**
6723 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6724 *
6725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6726 * @param cbInstr The number of bytes to add.
6727 */
6728IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6729{
6730 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6731
6732 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6733#if ARCH_BITS >= 64
6734 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6735 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6736 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6737#else
6738 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6739 pVCpu->cpum.GstCtx.rip += cbInstr;
6740 else
6741 pVCpu->cpum.GstCtx.eip += cbInstr;
6742#endif
6743}
6744
6745
6746/**
6747 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6748 *
6749 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6750 */
6751IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6752{
6753 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6754}
6755
6756
6757/**
6758 * Adds to the stack pointer.
6759 *
6760 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6761 * @param cbToAdd The number of bytes to add (8-bit!).
6762 */
6763DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6764{
6765 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6766 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6767 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6768 pVCpu->cpum.GstCtx.esp += cbToAdd;
6769 else
6770 pVCpu->cpum.GstCtx.sp += cbToAdd;
6771}
6772
6773
6774/**
6775 * Subtracts from the stack pointer.
6776 *
6777 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6778 * @param cbToSub The number of bytes to subtract (8-bit!).
6779 */
6780DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6781{
6782 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6783 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6784 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6785 pVCpu->cpum.GstCtx.esp -= cbToSub;
6786 else
6787 pVCpu->cpum.GstCtx.sp -= cbToSub;
6788}
6789
6790
6791/**
6792 * Adds to the temporary stack pointer.
6793 *
6794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6795 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6796 * @param cbToAdd The number of bytes to add (16-bit).
6797 */
6798DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6799{
6800 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6801 pTmpRsp->u += cbToAdd;
6802 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6803 pTmpRsp->DWords.dw0 += cbToAdd;
6804 else
6805 pTmpRsp->Words.w0 += cbToAdd;
6806}
6807
6808
6809/**
6810 * Subtracts from the temporary stack pointer.
6811 *
6812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6813 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6814 * @param cbToSub The number of bytes to subtract.
6815 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6816 * expecting that.
6817 */
6818DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6819{
6820 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6821 pTmpRsp->u -= cbToSub;
6822 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6823 pTmpRsp->DWords.dw0 -= cbToSub;
6824 else
6825 pTmpRsp->Words.w0 -= cbToSub;
6826}
6827
6828
6829/**
6830 * Calculates the effective stack address for a push of the specified size as
6831 * well as the new RSP value (upper bits may be masked).
6832 *
6833 * @returns Effective stack addressf for the push.
6834 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6835 * @param cbItem The size of the stack item to pop.
6836 * @param puNewRsp Where to return the new RSP value.
6837 */
6838DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6839{
6840 RTUINT64U uTmpRsp;
6841 RTGCPTR GCPtrTop;
6842 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6843
6844 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6845 GCPtrTop = uTmpRsp.u -= cbItem;
6846 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6847 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6848 else
6849 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6850 *puNewRsp = uTmpRsp.u;
6851 return GCPtrTop;
6852}
6853
6854
6855/**
6856 * Gets the current stack pointer and calculates the value after a pop of the
6857 * specified size.
6858 *
6859 * @returns Current stack pointer.
6860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6861 * @param cbItem The size of the stack item to pop.
6862 * @param puNewRsp Where to return the new RSP value.
6863 */
6864DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6865{
6866 RTUINT64U uTmpRsp;
6867 RTGCPTR GCPtrTop;
6868 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6869
6870 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6871 {
6872 GCPtrTop = uTmpRsp.u;
6873 uTmpRsp.u += cbItem;
6874 }
6875 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6876 {
6877 GCPtrTop = uTmpRsp.DWords.dw0;
6878 uTmpRsp.DWords.dw0 += cbItem;
6879 }
6880 else
6881 {
6882 GCPtrTop = uTmpRsp.Words.w0;
6883 uTmpRsp.Words.w0 += cbItem;
6884 }
6885 *puNewRsp = uTmpRsp.u;
6886 return GCPtrTop;
6887}
6888
6889
6890/**
6891 * Calculates the effective stack address for a push of the specified size as
6892 * well as the new temporary RSP value (upper bits may be masked).
6893 *
6894 * @returns Effective stack addressf for the push.
6895 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6896 * @param pTmpRsp The temporary stack pointer. This is updated.
6897 * @param cbItem The size of the stack item to pop.
6898 */
6899DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6900{
6901 RTGCPTR GCPtrTop;
6902
6903 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6904 GCPtrTop = pTmpRsp->u -= cbItem;
6905 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6906 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6907 else
6908 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6909 return GCPtrTop;
6910}
6911
6912
6913/**
6914 * Gets the effective stack address for a pop of the specified size and
6915 * calculates and updates the temporary RSP.
6916 *
6917 * @returns Current stack pointer.
6918 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6919 * @param pTmpRsp The temporary stack pointer. This is updated.
6920 * @param cbItem The size of the stack item to pop.
6921 */
6922DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6923{
6924 RTGCPTR GCPtrTop;
6925 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6926 {
6927 GCPtrTop = pTmpRsp->u;
6928 pTmpRsp->u += cbItem;
6929 }
6930 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6931 {
6932 GCPtrTop = pTmpRsp->DWords.dw0;
6933 pTmpRsp->DWords.dw0 += cbItem;
6934 }
6935 else
6936 {
6937 GCPtrTop = pTmpRsp->Words.w0;
6938 pTmpRsp->Words.w0 += cbItem;
6939 }
6940 return GCPtrTop;
6941}
6942
6943/** @} */
6944
6945
6946/** @name FPU access and helpers.
6947 *
6948 * @{
6949 */
6950
6951
6952/**
6953 * Hook for preparing to use the host FPU.
6954 *
6955 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6956 *
6957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6958 */
6959DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6960{
6961#ifdef IN_RING3
6962 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6963#else
6964 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6965#endif
6966 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6967}
6968
6969
6970/**
6971 * Hook for preparing to use the host FPU for SSE.
6972 *
6973 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6974 *
6975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6976 */
6977DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6978{
6979 iemFpuPrepareUsage(pVCpu);
6980}
6981
6982
6983/**
6984 * Hook for preparing to use the host FPU for AVX.
6985 *
6986 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6987 *
6988 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6989 */
6990DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
6991{
6992 iemFpuPrepareUsage(pVCpu);
6993}
6994
6995
6996/**
6997 * Hook for actualizing the guest FPU state before the interpreter reads it.
6998 *
6999 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7000 *
7001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7002 */
7003DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7004{
7005#ifdef IN_RING3
7006 NOREF(pVCpu);
7007#else
7008 CPUMRZFpuStateActualizeForRead(pVCpu);
7009#endif
7010 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7011}
7012
7013
7014/**
7015 * Hook for actualizing the guest FPU state before the interpreter changes it.
7016 *
7017 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7018 *
7019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7020 */
7021DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7022{
7023#ifdef IN_RING3
7024 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7025#else
7026 CPUMRZFpuStateActualizeForChange(pVCpu);
7027#endif
7028 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7029}
7030
7031
7032/**
7033 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7034 * only.
7035 *
7036 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7037 *
7038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7039 */
7040DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7041{
7042#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7043 NOREF(pVCpu);
7044#else
7045 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7046#endif
7047 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7048}
7049
7050
7051/**
7052 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7053 * read+write.
7054 *
7055 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7056 *
7057 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7058 */
7059DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7060{
7061#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7062 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7063#else
7064 CPUMRZFpuStateActualizeForChange(pVCpu);
7065#endif
7066 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7067}
7068
7069
7070/**
7071 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7072 * only.
7073 *
7074 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7075 *
7076 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7077 */
7078DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7079{
7080#ifdef IN_RING3
7081 NOREF(pVCpu);
7082#else
7083 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7084#endif
7085 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7086}
7087
7088
7089/**
7090 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7091 * read+write.
7092 *
7093 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7094 *
7095 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7096 */
7097DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7098{
7099#ifdef IN_RING3
7100 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7101#else
7102 CPUMRZFpuStateActualizeForChange(pVCpu);
7103#endif
7104 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7105}
7106
7107
7108/**
7109 * Stores a QNaN value into a FPU register.
7110 *
7111 * @param pReg Pointer to the register.
7112 */
7113DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7114{
7115 pReg->au32[0] = UINT32_C(0x00000000);
7116 pReg->au32[1] = UINT32_C(0xc0000000);
7117 pReg->au16[4] = UINT16_C(0xffff);
7118}
7119
7120
7121/**
7122 * Updates the FOP, FPU.CS and FPUIP registers.
7123 *
7124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7125 * @param pFpuCtx The FPU context.
7126 */
7127DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7128{
7129 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7130 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7131 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7132 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7133 {
7134 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7135 * happens in real mode here based on the fnsave and fnstenv images. */
7136 pFpuCtx->CS = 0;
7137 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7138 }
7139 else
7140 {
7141 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7142 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7143 }
7144}
7145
7146
7147/**
7148 * Updates the x87.DS and FPUDP registers.
7149 *
7150 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7151 * @param pFpuCtx The FPU context.
7152 * @param iEffSeg The effective segment register.
7153 * @param GCPtrEff The effective address relative to @a iEffSeg.
7154 */
7155DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7156{
7157 RTSEL sel;
7158 switch (iEffSeg)
7159 {
7160 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7161 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7162 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7163 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7164 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7165 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7166 default:
7167 AssertMsgFailed(("%d\n", iEffSeg));
7168 sel = pVCpu->cpum.GstCtx.ds.Sel;
7169 }
7170 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7171 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7172 {
7173 pFpuCtx->DS = 0;
7174 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7175 }
7176 else
7177 {
7178 pFpuCtx->DS = sel;
7179 pFpuCtx->FPUDP = GCPtrEff;
7180 }
7181}
7182
7183
7184/**
7185 * Rotates the stack registers in the push direction.
7186 *
7187 * @param pFpuCtx The FPU context.
7188 * @remarks This is a complete waste of time, but fxsave stores the registers in
7189 * stack order.
7190 */
7191DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7192{
7193 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7194 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7195 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7196 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7197 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7198 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7199 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7200 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7201 pFpuCtx->aRegs[0].r80 = r80Tmp;
7202}
7203
7204
7205/**
7206 * Rotates the stack registers in the pop direction.
7207 *
7208 * @param pFpuCtx The FPU context.
7209 * @remarks This is a complete waste of time, but fxsave stores the registers in
7210 * stack order.
7211 */
7212DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7213{
7214 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7215 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7216 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7217 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7218 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7219 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7220 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7221 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7222 pFpuCtx->aRegs[7].r80 = r80Tmp;
7223}
7224
7225
7226/**
7227 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7228 * exception prevents it.
7229 *
7230 * @param pResult The FPU operation result to push.
7231 * @param pFpuCtx The FPU context.
7232 */
7233IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7234{
7235 /* Update FSW and bail if there are pending exceptions afterwards. */
7236 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7237 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7238 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7239 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7240 {
7241 pFpuCtx->FSW = fFsw;
7242 return;
7243 }
7244
7245 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7246 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7247 {
7248 /* All is fine, push the actual value. */
7249 pFpuCtx->FTW |= RT_BIT(iNewTop);
7250 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7251 }
7252 else if (pFpuCtx->FCW & X86_FCW_IM)
7253 {
7254 /* Masked stack overflow, push QNaN. */
7255 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7256 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7257 }
7258 else
7259 {
7260 /* Raise stack overflow, don't push anything. */
7261 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7262 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7263 return;
7264 }
7265
7266 fFsw &= ~X86_FSW_TOP_MASK;
7267 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7268 pFpuCtx->FSW = fFsw;
7269
7270 iemFpuRotateStackPush(pFpuCtx);
7271}
7272
7273
7274/**
7275 * Stores a result in a FPU register and updates the FSW and FTW.
7276 *
7277 * @param pFpuCtx The FPU context.
7278 * @param pResult The result to store.
7279 * @param iStReg Which FPU register to store it in.
7280 */
7281IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7282{
7283 Assert(iStReg < 8);
7284 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7285 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7286 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7287 pFpuCtx->FTW |= RT_BIT(iReg);
7288 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7289}
7290
7291
7292/**
7293 * Only updates the FPU status word (FSW) with the result of the current
7294 * instruction.
7295 *
7296 * @param pFpuCtx The FPU context.
7297 * @param u16FSW The FSW output of the current instruction.
7298 */
7299IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7300{
7301 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7302 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7303}
7304
7305
7306/**
7307 * Pops one item off the FPU stack if no pending exception prevents it.
7308 *
7309 * @param pFpuCtx The FPU context.
7310 */
7311IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7312{
7313 /* Check pending exceptions. */
7314 uint16_t uFSW = pFpuCtx->FSW;
7315 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7316 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7317 return;
7318
7319 /* TOP--. */
7320 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7321 uFSW &= ~X86_FSW_TOP_MASK;
7322 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7323 pFpuCtx->FSW = uFSW;
7324
7325 /* Mark the previous ST0 as empty. */
7326 iOldTop >>= X86_FSW_TOP_SHIFT;
7327 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7328
7329 /* Rotate the registers. */
7330 iemFpuRotateStackPop(pFpuCtx);
7331}
7332
7333
7334/**
7335 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7336 *
7337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7338 * @param pResult The FPU operation result to push.
7339 */
7340IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7341{
7342 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7343 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7344 iemFpuMaybePushResult(pResult, pFpuCtx);
7345}
7346
7347
7348/**
7349 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7350 * and sets FPUDP and FPUDS.
7351 *
7352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7353 * @param pResult The FPU operation result to push.
7354 * @param iEffSeg The effective segment register.
7355 * @param GCPtrEff The effective address relative to @a iEffSeg.
7356 */
7357IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7358{
7359 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7360 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7361 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7362 iemFpuMaybePushResult(pResult, pFpuCtx);
7363}
7364
7365
7366/**
7367 * Replace ST0 with the first value and push the second onto the FPU stack,
7368 * unless a pending exception prevents it.
7369 *
7370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7371 * @param pResult The FPU operation result to store and push.
7372 */
7373IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7374{
7375 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7376 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7377
7378 /* Update FSW and bail if there are pending exceptions afterwards. */
7379 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7380 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7381 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7382 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7383 {
7384 pFpuCtx->FSW = fFsw;
7385 return;
7386 }
7387
7388 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7389 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7390 {
7391 /* All is fine, push the actual value. */
7392 pFpuCtx->FTW |= RT_BIT(iNewTop);
7393 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7394 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7395 }
7396 else if (pFpuCtx->FCW & X86_FCW_IM)
7397 {
7398 /* Masked stack overflow, push QNaN. */
7399 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7400 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7401 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7402 }
7403 else
7404 {
7405 /* Raise stack overflow, don't push anything. */
7406 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7407 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7408 return;
7409 }
7410
7411 fFsw &= ~X86_FSW_TOP_MASK;
7412 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7413 pFpuCtx->FSW = fFsw;
7414
7415 iemFpuRotateStackPush(pFpuCtx);
7416}
7417
7418
7419/**
7420 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7421 * FOP.
7422 *
7423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7424 * @param pResult The result to store.
7425 * @param iStReg Which FPU register to store it in.
7426 */
7427IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7428{
7429 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7430 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7431 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7432}
7433
7434
7435/**
7436 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7437 * FOP, and then pops the stack.
7438 *
7439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7440 * @param pResult The result to store.
7441 * @param iStReg Which FPU register to store it in.
7442 */
7443IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7444{
7445 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7446 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7447 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7448 iemFpuMaybePopOne(pFpuCtx);
7449}
7450
7451
7452/**
7453 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7454 * FPUDP, and FPUDS.
7455 *
7456 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7457 * @param pResult The result to store.
7458 * @param iStReg Which FPU register to store it in.
7459 * @param iEffSeg The effective memory operand selector register.
7460 * @param GCPtrEff The effective memory operand offset.
7461 */
7462IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7463 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7464{
7465 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7466 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7467 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7468 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7469}
7470
7471
7472/**
7473 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7474 * FPUDP, and FPUDS, and then pops the stack.
7475 *
7476 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7477 * @param pResult The result to store.
7478 * @param iStReg Which FPU register to store it in.
7479 * @param iEffSeg The effective memory operand selector register.
7480 * @param GCPtrEff The effective memory operand offset.
7481 */
7482IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7483 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7484{
7485 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7486 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7487 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7488 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7489 iemFpuMaybePopOne(pFpuCtx);
7490}
7491
7492
7493/**
7494 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7495 *
7496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7497 */
7498IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7499{
7500 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7501 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7502}
7503
7504
7505/**
7506 * Marks the specified stack register as free (for FFREE).
7507 *
7508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7509 * @param iStReg The register to free.
7510 */
7511IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7512{
7513 Assert(iStReg < 8);
7514 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7515 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7516 pFpuCtx->FTW &= ~RT_BIT(iReg);
7517}
7518
7519
7520/**
7521 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7522 *
7523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7524 */
7525IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7526{
7527 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7528 uint16_t uFsw = pFpuCtx->FSW;
7529 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7530 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7531 uFsw &= ~X86_FSW_TOP_MASK;
7532 uFsw |= uTop;
7533 pFpuCtx->FSW = uFsw;
7534}
7535
7536
7537/**
7538 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7539 *
7540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7541 */
7542IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7543{
7544 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7545 uint16_t uFsw = pFpuCtx->FSW;
7546 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7547 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7548 uFsw &= ~X86_FSW_TOP_MASK;
7549 uFsw |= uTop;
7550 pFpuCtx->FSW = uFsw;
7551}
7552
7553
7554/**
7555 * Updates the FSW, FOP, FPUIP, and FPUCS.
7556 *
7557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7558 * @param u16FSW The FSW from the current instruction.
7559 */
7560IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7561{
7562 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7563 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7564 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7565}
7566
7567
7568/**
7569 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7570 *
7571 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7572 * @param u16FSW The FSW from the current instruction.
7573 */
7574IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7575{
7576 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7577 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7578 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7579 iemFpuMaybePopOne(pFpuCtx);
7580}
7581
7582
7583/**
7584 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7585 *
7586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7587 * @param u16FSW The FSW from the current instruction.
7588 * @param iEffSeg The effective memory operand selector register.
7589 * @param GCPtrEff The effective memory operand offset.
7590 */
7591IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7592{
7593 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7594 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7595 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7596 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7597}
7598
7599
7600/**
7601 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7602 *
7603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7604 * @param u16FSW The FSW from the current instruction.
7605 */
7606IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7607{
7608 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7609 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7610 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7611 iemFpuMaybePopOne(pFpuCtx);
7612 iemFpuMaybePopOne(pFpuCtx);
7613}
7614
7615
7616/**
7617 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7618 *
7619 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7620 * @param u16FSW The FSW from the current instruction.
7621 * @param iEffSeg The effective memory operand selector register.
7622 * @param GCPtrEff The effective memory operand offset.
7623 */
7624IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7625{
7626 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7627 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7628 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7629 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7630 iemFpuMaybePopOne(pFpuCtx);
7631}
7632
7633
7634/**
7635 * Worker routine for raising an FPU stack underflow exception.
7636 *
7637 * @param pFpuCtx The FPU context.
7638 * @param iStReg The stack register being accessed.
7639 */
7640IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7641{
7642 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7643 if (pFpuCtx->FCW & X86_FCW_IM)
7644 {
7645 /* Masked underflow. */
7646 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7647 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7648 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7649 if (iStReg != UINT8_MAX)
7650 {
7651 pFpuCtx->FTW |= RT_BIT(iReg);
7652 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7653 }
7654 }
7655 else
7656 {
7657 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7658 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7659 }
7660}
7661
7662
7663/**
7664 * Raises a FPU stack underflow exception.
7665 *
7666 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7667 * @param iStReg The destination register that should be loaded
7668 * with QNaN if \#IS is not masked. Specify
7669 * UINT8_MAX if none (like for fcom).
7670 */
7671DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7672{
7673 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7674 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7675 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7676}
7677
7678
7679DECL_NO_INLINE(IEM_STATIC, void)
7680iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7681{
7682 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7683 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7684 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7685 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7686}
7687
7688
7689DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7690{
7691 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7692 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7693 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7694 iemFpuMaybePopOne(pFpuCtx);
7695}
7696
7697
7698DECL_NO_INLINE(IEM_STATIC, void)
7699iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7700{
7701 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7702 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7703 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7704 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7705 iemFpuMaybePopOne(pFpuCtx);
7706}
7707
7708
7709DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7710{
7711 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7712 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7713 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7714 iemFpuMaybePopOne(pFpuCtx);
7715 iemFpuMaybePopOne(pFpuCtx);
7716}
7717
7718
7719DECL_NO_INLINE(IEM_STATIC, void)
7720iemFpuStackPushUnderflow(PVMCPU pVCpu)
7721{
7722 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7723 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7724
7725 if (pFpuCtx->FCW & X86_FCW_IM)
7726 {
7727 /* Masked overflow - Push QNaN. */
7728 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7729 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7730 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7731 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7732 pFpuCtx->FTW |= RT_BIT(iNewTop);
7733 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7734 iemFpuRotateStackPush(pFpuCtx);
7735 }
7736 else
7737 {
7738 /* Exception pending - don't change TOP or the register stack. */
7739 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7740 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7741 }
7742}
7743
7744
7745DECL_NO_INLINE(IEM_STATIC, void)
7746iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7747{
7748 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7749 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7750
7751 if (pFpuCtx->FCW & X86_FCW_IM)
7752 {
7753 /* Masked overflow - Push QNaN. */
7754 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7755 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7756 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7757 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7758 pFpuCtx->FTW |= RT_BIT(iNewTop);
7759 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7760 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7761 iemFpuRotateStackPush(pFpuCtx);
7762 }
7763 else
7764 {
7765 /* Exception pending - don't change TOP or the register stack. */
7766 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7767 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7768 }
7769}
7770
7771
7772/**
7773 * Worker routine for raising an FPU stack overflow exception on a push.
7774 *
7775 * @param pFpuCtx The FPU context.
7776 */
7777IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7778{
7779 if (pFpuCtx->FCW & X86_FCW_IM)
7780 {
7781 /* Masked overflow. */
7782 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7783 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7784 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7785 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7786 pFpuCtx->FTW |= RT_BIT(iNewTop);
7787 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7788 iemFpuRotateStackPush(pFpuCtx);
7789 }
7790 else
7791 {
7792 /* Exception pending - don't change TOP or the register stack. */
7793 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7794 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7795 }
7796}
7797
7798
7799/**
7800 * Raises a FPU stack overflow exception on a push.
7801 *
7802 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7803 */
7804DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7805{
7806 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7807 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7808 iemFpuStackPushOverflowOnly(pFpuCtx);
7809}
7810
7811
7812/**
7813 * Raises a FPU stack overflow exception on a push with a memory operand.
7814 *
7815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7816 * @param iEffSeg The effective memory operand selector register.
7817 * @param GCPtrEff The effective memory operand offset.
7818 */
7819DECL_NO_INLINE(IEM_STATIC, void)
7820iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7821{
7822 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7823 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7824 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7825 iemFpuStackPushOverflowOnly(pFpuCtx);
7826}
7827
7828
7829IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7830{
7831 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7832 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7833 if (pFpuCtx->FTW & RT_BIT(iReg))
7834 return VINF_SUCCESS;
7835 return VERR_NOT_FOUND;
7836}
7837
7838
7839IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7840{
7841 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7842 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7843 if (pFpuCtx->FTW & RT_BIT(iReg))
7844 {
7845 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7846 return VINF_SUCCESS;
7847 }
7848 return VERR_NOT_FOUND;
7849}
7850
7851
7852IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7853 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7854{
7855 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7856 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7857 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7858 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7859 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7860 {
7861 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7862 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7863 return VINF_SUCCESS;
7864 }
7865 return VERR_NOT_FOUND;
7866}
7867
7868
7869IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7870{
7871 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7872 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7873 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7874 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7875 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7876 {
7877 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7878 return VINF_SUCCESS;
7879 }
7880 return VERR_NOT_FOUND;
7881}
7882
7883
7884/**
7885 * Updates the FPU exception status after FCW is changed.
7886 *
7887 * @param pFpuCtx The FPU context.
7888 */
7889IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7890{
7891 uint16_t u16Fsw = pFpuCtx->FSW;
7892 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7893 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7894 else
7895 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7896 pFpuCtx->FSW = u16Fsw;
7897}
7898
7899
7900/**
7901 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7902 *
7903 * @returns The full FTW.
7904 * @param pFpuCtx The FPU context.
7905 */
7906IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7907{
7908 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7909 uint16_t u16Ftw = 0;
7910 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7911 for (unsigned iSt = 0; iSt < 8; iSt++)
7912 {
7913 unsigned const iReg = (iSt + iTop) & 7;
7914 if (!(u8Ftw & RT_BIT(iReg)))
7915 u16Ftw |= 3 << (iReg * 2); /* empty */
7916 else
7917 {
7918 uint16_t uTag;
7919 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7920 if (pr80Reg->s.uExponent == 0x7fff)
7921 uTag = 2; /* Exponent is all 1's => Special. */
7922 else if (pr80Reg->s.uExponent == 0x0000)
7923 {
7924 if (pr80Reg->s.u64Mantissa == 0x0000)
7925 uTag = 1; /* All bits are zero => Zero. */
7926 else
7927 uTag = 2; /* Must be special. */
7928 }
7929 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7930 uTag = 0; /* Valid. */
7931 else
7932 uTag = 2; /* Must be special. */
7933
7934 u16Ftw |= uTag << (iReg * 2); /* empty */
7935 }
7936 }
7937
7938 return u16Ftw;
7939}
7940
7941
7942/**
7943 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7944 *
7945 * @returns The compressed FTW.
7946 * @param u16FullFtw The full FTW to convert.
7947 */
7948IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7949{
7950 uint8_t u8Ftw = 0;
7951 for (unsigned i = 0; i < 8; i++)
7952 {
7953 if ((u16FullFtw & 3) != 3 /*empty*/)
7954 u8Ftw |= RT_BIT(i);
7955 u16FullFtw >>= 2;
7956 }
7957
7958 return u8Ftw;
7959}
7960
7961/** @} */
7962
7963
7964/** @name Memory access.
7965 *
7966 * @{
7967 */
7968
7969
7970/**
7971 * Updates the IEMCPU::cbWritten counter if applicable.
7972 *
7973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7974 * @param fAccess The access being accounted for.
7975 * @param cbMem The access size.
7976 */
7977DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7978{
7979 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7980 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7981 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7982}
7983
7984
7985/**
7986 * Checks if the given segment can be written to, raise the appropriate
7987 * exception if not.
7988 *
7989 * @returns VBox strict status code.
7990 *
7991 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7992 * @param pHid Pointer to the hidden register.
7993 * @param iSegReg The register number.
7994 * @param pu64BaseAddr Where to return the base address to use for the
7995 * segment. (In 64-bit code it may differ from the
7996 * base in the hidden segment.)
7997 */
7998IEM_STATIC VBOXSTRICTRC
7999iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8000{
8001 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8002
8003 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8004 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8005 else
8006 {
8007 if (!pHid->Attr.n.u1Present)
8008 {
8009 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8010 AssertRelease(uSel == 0);
8011 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8012 return iemRaiseGeneralProtectionFault0(pVCpu);
8013 }
8014
8015 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8016 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8017 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8018 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8019 *pu64BaseAddr = pHid->u64Base;
8020 }
8021 return VINF_SUCCESS;
8022}
8023
8024
8025/**
8026 * Checks if the given segment can be read from, raise the appropriate
8027 * exception if not.
8028 *
8029 * @returns VBox strict status code.
8030 *
8031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8032 * @param pHid Pointer to the hidden register.
8033 * @param iSegReg The register number.
8034 * @param pu64BaseAddr Where to return the base address to use for the
8035 * segment. (In 64-bit code it may differ from the
8036 * base in the hidden segment.)
8037 */
8038IEM_STATIC VBOXSTRICTRC
8039iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8040{
8041 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8042
8043 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8044 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8045 else
8046 {
8047 if (!pHid->Attr.n.u1Present)
8048 {
8049 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8050 AssertRelease(uSel == 0);
8051 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8052 return iemRaiseGeneralProtectionFault0(pVCpu);
8053 }
8054
8055 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8056 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8057 *pu64BaseAddr = pHid->u64Base;
8058 }
8059 return VINF_SUCCESS;
8060}
8061
8062
8063/**
8064 * Applies the segment limit, base and attributes.
8065 *
8066 * This may raise a \#GP or \#SS.
8067 *
8068 * @returns VBox strict status code.
8069 *
8070 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8071 * @param fAccess The kind of access which is being performed.
8072 * @param iSegReg The index of the segment register to apply.
8073 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8074 * TSS, ++).
8075 * @param cbMem The access size.
8076 * @param pGCPtrMem Pointer to the guest memory address to apply
8077 * segmentation to. Input and output parameter.
8078 */
8079IEM_STATIC VBOXSTRICTRC
8080iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8081{
8082 if (iSegReg == UINT8_MAX)
8083 return VINF_SUCCESS;
8084
8085 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8086 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8087 switch (pVCpu->iem.s.enmCpuMode)
8088 {
8089 case IEMMODE_16BIT:
8090 case IEMMODE_32BIT:
8091 {
8092 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8093 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8094
8095 if ( pSel->Attr.n.u1Present
8096 && !pSel->Attr.n.u1Unusable)
8097 {
8098 Assert(pSel->Attr.n.u1DescType);
8099 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8100 {
8101 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8102 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8103 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8104
8105 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8106 {
8107 /** @todo CPL check. */
8108 }
8109
8110 /*
8111 * There are two kinds of data selectors, normal and expand down.
8112 */
8113 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8114 {
8115 if ( GCPtrFirst32 > pSel->u32Limit
8116 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8117 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8118 }
8119 else
8120 {
8121 /*
8122 * The upper boundary is defined by the B bit, not the G bit!
8123 */
8124 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8125 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8126 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8127 }
8128 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8129 }
8130 else
8131 {
8132
8133 /*
8134 * Code selector and usually be used to read thru, writing is
8135 * only permitted in real and V8086 mode.
8136 */
8137 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8138 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8139 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8140 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8141 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8142
8143 if ( GCPtrFirst32 > pSel->u32Limit
8144 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8145 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8146
8147 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8148 {
8149 /** @todo CPL check. */
8150 }
8151
8152 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8153 }
8154 }
8155 else
8156 return iemRaiseGeneralProtectionFault0(pVCpu);
8157 return VINF_SUCCESS;
8158 }
8159
8160 case IEMMODE_64BIT:
8161 {
8162 RTGCPTR GCPtrMem = *pGCPtrMem;
8163 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8164 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8165
8166 Assert(cbMem >= 1);
8167 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8168 return VINF_SUCCESS;
8169 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8170 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8171 return iemRaiseGeneralProtectionFault0(pVCpu);
8172 }
8173
8174 default:
8175 AssertFailedReturn(VERR_IEM_IPE_7);
8176 }
8177}
8178
8179
8180/**
8181 * Translates a virtual address to a physical physical address and checks if we
8182 * can access the page as specified.
8183 *
8184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8185 * @param GCPtrMem The virtual address.
8186 * @param fAccess The intended access.
8187 * @param pGCPhysMem Where to return the physical address.
8188 */
8189IEM_STATIC VBOXSTRICTRC
8190iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8191{
8192 /** @todo Need a different PGM interface here. We're currently using
8193 * generic / REM interfaces. this won't cut it for R0 & RC. */
8194 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8195 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8196 RTGCPHYS GCPhys;
8197 uint64_t fFlags;
8198 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8199 if (RT_FAILURE(rc))
8200 {
8201 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8202 /** @todo Check unassigned memory in unpaged mode. */
8203 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8204 *pGCPhysMem = NIL_RTGCPHYS;
8205 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8206 }
8207
8208 /* If the page is writable and does not have the no-exec bit set, all
8209 access is allowed. Otherwise we'll have to check more carefully... */
8210 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8211 {
8212 /* Write to read only memory? */
8213 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8214 && !(fFlags & X86_PTE_RW)
8215 && ( (pVCpu->iem.s.uCpl == 3
8216 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8217 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8218 {
8219 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8220 *pGCPhysMem = NIL_RTGCPHYS;
8221 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8222 }
8223
8224 /* Kernel memory accessed by userland? */
8225 if ( !(fFlags & X86_PTE_US)
8226 && pVCpu->iem.s.uCpl == 3
8227 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8228 {
8229 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8230 *pGCPhysMem = NIL_RTGCPHYS;
8231 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8232 }
8233
8234 /* Executing non-executable memory? */
8235 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8236 && (fFlags & X86_PTE_PAE_NX)
8237 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8238 {
8239 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8240 *pGCPhysMem = NIL_RTGCPHYS;
8241 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8242 VERR_ACCESS_DENIED);
8243 }
8244 }
8245
8246 /*
8247 * Set the dirty / access flags.
8248 * ASSUMES this is set when the address is translated rather than on committ...
8249 */
8250 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8251 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8252 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8253 {
8254 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8255 AssertRC(rc2);
8256 }
8257
8258 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8259 *pGCPhysMem = GCPhys;
8260 return VINF_SUCCESS;
8261}
8262
8263
8264
8265/**
8266 * Maps a physical page.
8267 *
8268 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8270 * @param GCPhysMem The physical address.
8271 * @param fAccess The intended access.
8272 * @param ppvMem Where to return the mapping address.
8273 * @param pLock The PGM lock.
8274 */
8275IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8276{
8277#ifdef IEM_LOG_MEMORY_WRITES
8278 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8279 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8280#endif
8281
8282 /** @todo This API may require some improving later. A private deal with PGM
8283 * regarding locking and unlocking needs to be struct. A couple of TLBs
8284 * living in PGM, but with publicly accessible inlined access methods
8285 * could perhaps be an even better solution. */
8286 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8287 GCPhysMem,
8288 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8289 pVCpu->iem.s.fBypassHandlers,
8290 ppvMem,
8291 pLock);
8292 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8293 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8294
8295 return rc;
8296}
8297
8298
8299/**
8300 * Unmap a page previously mapped by iemMemPageMap.
8301 *
8302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8303 * @param GCPhysMem The physical address.
8304 * @param fAccess The intended access.
8305 * @param pvMem What iemMemPageMap returned.
8306 * @param pLock The PGM lock.
8307 */
8308DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8309{
8310 NOREF(pVCpu);
8311 NOREF(GCPhysMem);
8312 NOREF(fAccess);
8313 NOREF(pvMem);
8314 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8315}
8316
8317
8318/**
8319 * Looks up a memory mapping entry.
8320 *
8321 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8322 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8323 * @param pvMem The memory address.
8324 * @param fAccess The access to.
8325 */
8326DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8327{
8328 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8329 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8330 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8331 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8332 return 0;
8333 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8334 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8335 return 1;
8336 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8337 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8338 return 2;
8339 return VERR_NOT_FOUND;
8340}
8341
8342
8343/**
8344 * Finds a free memmap entry when using iNextMapping doesn't work.
8345 *
8346 * @returns Memory mapping index, 1024 on failure.
8347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8348 */
8349IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8350{
8351 /*
8352 * The easy case.
8353 */
8354 if (pVCpu->iem.s.cActiveMappings == 0)
8355 {
8356 pVCpu->iem.s.iNextMapping = 1;
8357 return 0;
8358 }
8359
8360 /* There should be enough mappings for all instructions. */
8361 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8362
8363 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8364 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8365 return i;
8366
8367 AssertFailedReturn(1024);
8368}
8369
8370
8371/**
8372 * Commits a bounce buffer that needs writing back and unmaps it.
8373 *
8374 * @returns Strict VBox status code.
8375 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8376 * @param iMemMap The index of the buffer to commit.
8377 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8378 * Always false in ring-3, obviously.
8379 */
8380IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8381{
8382 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8383 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8384#ifdef IN_RING3
8385 Assert(!fPostponeFail);
8386 RT_NOREF_PV(fPostponeFail);
8387#endif
8388
8389 /*
8390 * Do the writing.
8391 */
8392 PVM pVM = pVCpu->CTX_SUFF(pVM);
8393 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8394 {
8395 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8396 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8397 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8398 if (!pVCpu->iem.s.fBypassHandlers)
8399 {
8400 /*
8401 * Carefully and efficiently dealing with access handler return
8402 * codes make this a little bloated.
8403 */
8404 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8405 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8406 pbBuf,
8407 cbFirst,
8408 PGMACCESSORIGIN_IEM);
8409 if (rcStrict == VINF_SUCCESS)
8410 {
8411 if (cbSecond)
8412 {
8413 rcStrict = PGMPhysWrite(pVM,
8414 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8415 pbBuf + cbFirst,
8416 cbSecond,
8417 PGMACCESSORIGIN_IEM);
8418 if (rcStrict == VINF_SUCCESS)
8419 { /* nothing */ }
8420 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8421 {
8422 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8423 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8425 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8426 }
8427#ifndef IN_RING3
8428 else if (fPostponeFail)
8429 {
8430 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8432 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8433 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8434 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8435 return iemSetPassUpStatus(pVCpu, rcStrict);
8436 }
8437#endif
8438 else
8439 {
8440 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8442 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8443 return rcStrict;
8444 }
8445 }
8446 }
8447 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8448 {
8449 if (!cbSecond)
8450 {
8451 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8453 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8454 }
8455 else
8456 {
8457 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8459 pbBuf + cbFirst,
8460 cbSecond,
8461 PGMACCESSORIGIN_IEM);
8462 if (rcStrict2 == VINF_SUCCESS)
8463 {
8464 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8465 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8466 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8467 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8468 }
8469 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8470 {
8471 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8472 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8473 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8474 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8475 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8476 }
8477#ifndef IN_RING3
8478 else if (fPostponeFail)
8479 {
8480 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8481 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8483 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8484 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8485 return iemSetPassUpStatus(pVCpu, rcStrict);
8486 }
8487#endif
8488 else
8489 {
8490 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8491 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8492 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8493 return rcStrict2;
8494 }
8495 }
8496 }
8497#ifndef IN_RING3
8498 else if (fPostponeFail)
8499 {
8500 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8503 if (!cbSecond)
8504 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8505 else
8506 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8507 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8508 return iemSetPassUpStatus(pVCpu, rcStrict);
8509 }
8510#endif
8511 else
8512 {
8513 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8514 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8515 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8516 return rcStrict;
8517 }
8518 }
8519 else
8520 {
8521 /*
8522 * No access handlers, much simpler.
8523 */
8524 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8525 if (RT_SUCCESS(rc))
8526 {
8527 if (cbSecond)
8528 {
8529 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8530 if (RT_SUCCESS(rc))
8531 { /* likely */ }
8532 else
8533 {
8534 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8535 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8536 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8537 return rc;
8538 }
8539 }
8540 }
8541 else
8542 {
8543 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8544 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8545 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8546 return rc;
8547 }
8548 }
8549 }
8550
8551#if defined(IEM_LOG_MEMORY_WRITES)
8552 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8553 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8554 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8555 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8556 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8557 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8558
8559 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8560 g_cbIemWrote = cbWrote;
8561 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8562#endif
8563
8564 /*
8565 * Free the mapping entry.
8566 */
8567 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8568 Assert(pVCpu->iem.s.cActiveMappings != 0);
8569 pVCpu->iem.s.cActiveMappings--;
8570 return VINF_SUCCESS;
8571}
8572
8573
8574/**
8575 * iemMemMap worker that deals with a request crossing pages.
8576 */
8577IEM_STATIC VBOXSTRICTRC
8578iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8579{
8580 /*
8581 * Do the address translations.
8582 */
8583 RTGCPHYS GCPhysFirst;
8584 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8585 if (rcStrict != VINF_SUCCESS)
8586 return rcStrict;
8587
8588 RTGCPHYS GCPhysSecond;
8589 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8590 fAccess, &GCPhysSecond);
8591 if (rcStrict != VINF_SUCCESS)
8592 return rcStrict;
8593 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8594
8595 PVM pVM = pVCpu->CTX_SUFF(pVM);
8596
8597 /*
8598 * Read in the current memory content if it's a read, execute or partial
8599 * write access.
8600 */
8601 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8602 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8603 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8604
8605 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8606 {
8607 if (!pVCpu->iem.s.fBypassHandlers)
8608 {
8609 /*
8610 * Must carefully deal with access handler status codes here,
8611 * makes the code a bit bloated.
8612 */
8613 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8614 if (rcStrict == VINF_SUCCESS)
8615 {
8616 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8617 if (rcStrict == VINF_SUCCESS)
8618 { /*likely */ }
8619 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8620 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8621 else
8622 {
8623 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8624 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8625 return rcStrict;
8626 }
8627 }
8628 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8629 {
8630 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8631 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8632 {
8633 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8634 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8635 }
8636 else
8637 {
8638 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8639 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8640 return rcStrict2;
8641 }
8642 }
8643 else
8644 {
8645 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8646 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8647 return rcStrict;
8648 }
8649 }
8650 else
8651 {
8652 /*
8653 * No informational status codes here, much more straight forward.
8654 */
8655 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8656 if (RT_SUCCESS(rc))
8657 {
8658 Assert(rc == VINF_SUCCESS);
8659 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8660 if (RT_SUCCESS(rc))
8661 Assert(rc == VINF_SUCCESS);
8662 else
8663 {
8664 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8665 return rc;
8666 }
8667 }
8668 else
8669 {
8670 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8671 return rc;
8672 }
8673 }
8674 }
8675#ifdef VBOX_STRICT
8676 else
8677 memset(pbBuf, 0xcc, cbMem);
8678 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8679 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8680#endif
8681
8682 /*
8683 * Commit the bounce buffer entry.
8684 */
8685 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8686 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8687 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8688 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8689 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8690 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8691 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8692 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8693 pVCpu->iem.s.cActiveMappings++;
8694
8695 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8696 *ppvMem = pbBuf;
8697 return VINF_SUCCESS;
8698}
8699
8700
8701/**
8702 * iemMemMap woker that deals with iemMemPageMap failures.
8703 */
8704IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8705 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8706{
8707 /*
8708 * Filter out conditions we can handle and the ones which shouldn't happen.
8709 */
8710 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8711 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8712 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8713 {
8714 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8715 return rcMap;
8716 }
8717 pVCpu->iem.s.cPotentialExits++;
8718
8719 /*
8720 * Read in the current memory content if it's a read, execute or partial
8721 * write access.
8722 */
8723 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8724 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8725 {
8726 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8727 memset(pbBuf, 0xff, cbMem);
8728 else
8729 {
8730 int rc;
8731 if (!pVCpu->iem.s.fBypassHandlers)
8732 {
8733 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8734 if (rcStrict == VINF_SUCCESS)
8735 { /* nothing */ }
8736 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8737 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8738 else
8739 {
8740 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8741 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8742 return rcStrict;
8743 }
8744 }
8745 else
8746 {
8747 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8748 if (RT_SUCCESS(rc))
8749 { /* likely */ }
8750 else
8751 {
8752 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8753 GCPhysFirst, rc));
8754 return rc;
8755 }
8756 }
8757 }
8758 }
8759#ifdef VBOX_STRICT
8760 else
8761 memset(pbBuf, 0xcc, cbMem);
8762#endif
8763#ifdef VBOX_STRICT
8764 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8765 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8766#endif
8767
8768 /*
8769 * Commit the bounce buffer entry.
8770 */
8771 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8772 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8773 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8774 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8775 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8776 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8777 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8778 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8779 pVCpu->iem.s.cActiveMappings++;
8780
8781 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8782 *ppvMem = pbBuf;
8783 return VINF_SUCCESS;
8784}
8785
8786
8787
8788/**
8789 * Maps the specified guest memory for the given kind of access.
8790 *
8791 * This may be using bounce buffering of the memory if it's crossing a page
8792 * boundary or if there is an access handler installed for any of it. Because
8793 * of lock prefix guarantees, we're in for some extra clutter when this
8794 * happens.
8795 *
8796 * This may raise a \#GP, \#SS, \#PF or \#AC.
8797 *
8798 * @returns VBox strict status code.
8799 *
8800 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8801 * @param ppvMem Where to return the pointer to the mapped
8802 * memory.
8803 * @param cbMem The number of bytes to map. This is usually 1,
8804 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8805 * string operations it can be up to a page.
8806 * @param iSegReg The index of the segment register to use for
8807 * this access. The base and limits are checked.
8808 * Use UINT8_MAX to indicate that no segmentation
8809 * is required (for IDT, GDT and LDT accesses).
8810 * @param GCPtrMem The address of the guest memory.
8811 * @param fAccess How the memory is being accessed. The
8812 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8813 * how to map the memory, while the
8814 * IEM_ACCESS_WHAT_XXX bit is used when raising
8815 * exceptions.
8816 */
8817IEM_STATIC VBOXSTRICTRC
8818iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8819{
8820 /*
8821 * Check the input and figure out which mapping entry to use.
8822 */
8823 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8824 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8825 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8826
8827 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8828 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8829 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8830 {
8831 iMemMap = iemMemMapFindFree(pVCpu);
8832 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8833 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8834 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8835 pVCpu->iem.s.aMemMappings[2].fAccess),
8836 VERR_IEM_IPE_9);
8837 }
8838
8839 /*
8840 * Map the memory, checking that we can actually access it. If something
8841 * slightly complicated happens, fall back on bounce buffering.
8842 */
8843 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8844 if (rcStrict != VINF_SUCCESS)
8845 return rcStrict;
8846
8847 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8848 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8849
8850 RTGCPHYS GCPhysFirst;
8851 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8852 if (rcStrict != VINF_SUCCESS)
8853 return rcStrict;
8854
8855 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8856 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8857 if (fAccess & IEM_ACCESS_TYPE_READ)
8858 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8859
8860 void *pvMem;
8861 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8862 if (rcStrict != VINF_SUCCESS)
8863 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8864
8865 /*
8866 * Fill in the mapping table entry.
8867 */
8868 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8869 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8870 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8871 pVCpu->iem.s.cActiveMappings++;
8872
8873 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8874 *ppvMem = pvMem;
8875 return VINF_SUCCESS;
8876}
8877
8878
8879/**
8880 * Commits the guest memory if bounce buffered and unmaps it.
8881 *
8882 * @returns Strict VBox status code.
8883 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8884 * @param pvMem The mapping.
8885 * @param fAccess The kind of access.
8886 */
8887IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8888{
8889 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8890 AssertReturn(iMemMap >= 0, iMemMap);
8891
8892 /* If it's bounce buffered, we may need to write back the buffer. */
8893 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8894 {
8895 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8896 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8897 }
8898 /* Otherwise unlock it. */
8899 else
8900 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8901
8902 /* Free the entry. */
8903 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8904 Assert(pVCpu->iem.s.cActiveMappings != 0);
8905 pVCpu->iem.s.cActiveMappings--;
8906 return VINF_SUCCESS;
8907}
8908
8909#ifdef IEM_WITH_SETJMP
8910
8911/**
8912 * Maps the specified guest memory for the given kind of access, longjmp on
8913 * error.
8914 *
8915 * This may be using bounce buffering of the memory if it's crossing a page
8916 * boundary or if there is an access handler installed for any of it. Because
8917 * of lock prefix guarantees, we're in for some extra clutter when this
8918 * happens.
8919 *
8920 * This may raise a \#GP, \#SS, \#PF or \#AC.
8921 *
8922 * @returns Pointer to the mapped memory.
8923 *
8924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8925 * @param cbMem The number of bytes to map. This is usually 1,
8926 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8927 * string operations it can be up to a page.
8928 * @param iSegReg The index of the segment register to use for
8929 * this access. The base and limits are checked.
8930 * Use UINT8_MAX to indicate that no segmentation
8931 * is required (for IDT, GDT and LDT accesses).
8932 * @param GCPtrMem The address of the guest memory.
8933 * @param fAccess How the memory is being accessed. The
8934 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8935 * how to map the memory, while the
8936 * IEM_ACCESS_WHAT_XXX bit is used when raising
8937 * exceptions.
8938 */
8939IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8940{
8941 /*
8942 * Check the input and figure out which mapping entry to use.
8943 */
8944 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8945 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8946 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8947
8948 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8949 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8950 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8951 {
8952 iMemMap = iemMemMapFindFree(pVCpu);
8953 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8954 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8955 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8956 pVCpu->iem.s.aMemMappings[2].fAccess),
8957 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8958 }
8959
8960 /*
8961 * Map the memory, checking that we can actually access it. If something
8962 * slightly complicated happens, fall back on bounce buffering.
8963 */
8964 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8965 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8966 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8967
8968 /* Crossing a page boundary? */
8969 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8970 { /* No (likely). */ }
8971 else
8972 {
8973 void *pvMem;
8974 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8975 if (rcStrict == VINF_SUCCESS)
8976 return pvMem;
8977 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8978 }
8979
8980 RTGCPHYS GCPhysFirst;
8981 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8982 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8983 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8984
8985 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8986 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8987 if (fAccess & IEM_ACCESS_TYPE_READ)
8988 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8989
8990 void *pvMem;
8991 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8992 if (rcStrict == VINF_SUCCESS)
8993 { /* likely */ }
8994 else
8995 {
8996 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8997 if (rcStrict == VINF_SUCCESS)
8998 return pvMem;
8999 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9000 }
9001
9002 /*
9003 * Fill in the mapping table entry.
9004 */
9005 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9006 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9007 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9008 pVCpu->iem.s.cActiveMappings++;
9009
9010 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9011 return pvMem;
9012}
9013
9014
9015/**
9016 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9017 *
9018 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9019 * @param pvMem The mapping.
9020 * @param fAccess The kind of access.
9021 */
9022IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9023{
9024 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9025 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9026
9027 /* If it's bounce buffered, we may need to write back the buffer. */
9028 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9029 {
9030 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9031 {
9032 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9033 if (rcStrict == VINF_SUCCESS)
9034 return;
9035 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9036 }
9037 }
9038 /* Otherwise unlock it. */
9039 else
9040 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9041
9042 /* Free the entry. */
9043 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9044 Assert(pVCpu->iem.s.cActiveMappings != 0);
9045 pVCpu->iem.s.cActiveMappings--;
9046}
9047
9048#endif /* IEM_WITH_SETJMP */
9049
9050#ifndef IN_RING3
9051/**
9052 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9053 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9054 *
9055 * Allows the instruction to be completed and retired, while the IEM user will
9056 * return to ring-3 immediately afterwards and do the postponed writes there.
9057 *
9058 * @returns VBox status code (no strict statuses). Caller must check
9059 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9060 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9061 * @param pvMem The mapping.
9062 * @param fAccess The kind of access.
9063 */
9064IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9065{
9066 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9067 AssertReturn(iMemMap >= 0, iMemMap);
9068
9069 /* If it's bounce buffered, we may need to write back the buffer. */
9070 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9071 {
9072 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9073 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9074 }
9075 /* Otherwise unlock it. */
9076 else
9077 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9078
9079 /* Free the entry. */
9080 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9081 Assert(pVCpu->iem.s.cActiveMappings != 0);
9082 pVCpu->iem.s.cActiveMappings--;
9083 return VINF_SUCCESS;
9084}
9085#endif
9086
9087
9088/**
9089 * Rollbacks mappings, releasing page locks and such.
9090 *
9091 * The caller shall only call this after checking cActiveMappings.
9092 *
9093 * @returns Strict VBox status code to pass up.
9094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9095 */
9096IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9097{
9098 Assert(pVCpu->iem.s.cActiveMappings > 0);
9099
9100 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9101 while (iMemMap-- > 0)
9102 {
9103 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9104 if (fAccess != IEM_ACCESS_INVALID)
9105 {
9106 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9107 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9108 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9109 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9110 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9111 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9112 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9113 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9114 pVCpu->iem.s.cActiveMappings--;
9115 }
9116 }
9117}
9118
9119
9120/**
9121 * Fetches a data byte.
9122 *
9123 * @returns Strict VBox status code.
9124 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9125 * @param pu8Dst Where to return the byte.
9126 * @param iSegReg The index of the segment register to use for
9127 * this access. The base and limits are checked.
9128 * @param GCPtrMem The address of the guest memory.
9129 */
9130IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9131{
9132 /* The lazy approach for now... */
9133 uint8_t const *pu8Src;
9134 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9135 if (rc == VINF_SUCCESS)
9136 {
9137 *pu8Dst = *pu8Src;
9138 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9139 }
9140 return rc;
9141}
9142
9143
9144#ifdef IEM_WITH_SETJMP
9145/**
9146 * Fetches a data byte, longjmp on error.
9147 *
9148 * @returns The byte.
9149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9150 * @param iSegReg The index of the segment register to use for
9151 * this access. The base and limits are checked.
9152 * @param GCPtrMem The address of the guest memory.
9153 */
9154DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9155{
9156 /* The lazy approach for now... */
9157 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9158 uint8_t const bRet = *pu8Src;
9159 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9160 return bRet;
9161}
9162#endif /* IEM_WITH_SETJMP */
9163
9164
9165/**
9166 * Fetches a data word.
9167 *
9168 * @returns Strict VBox status code.
9169 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9170 * @param pu16Dst Where to return the word.
9171 * @param iSegReg The index of the segment register to use for
9172 * this access. The base and limits are checked.
9173 * @param GCPtrMem The address of the guest memory.
9174 */
9175IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9176{
9177 /* The lazy approach for now... */
9178 uint16_t const *pu16Src;
9179 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9180 if (rc == VINF_SUCCESS)
9181 {
9182 *pu16Dst = *pu16Src;
9183 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9184 }
9185 return rc;
9186}
9187
9188
9189#ifdef IEM_WITH_SETJMP
9190/**
9191 * Fetches a data word, longjmp on error.
9192 *
9193 * @returns The word
9194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9195 * @param iSegReg The index of the segment register to use for
9196 * this access. The base and limits are checked.
9197 * @param GCPtrMem The address of the guest memory.
9198 */
9199DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9200{
9201 /* The lazy approach for now... */
9202 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9203 uint16_t const u16Ret = *pu16Src;
9204 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9205 return u16Ret;
9206}
9207#endif
9208
9209
9210/**
9211 * Fetches a data dword.
9212 *
9213 * @returns Strict VBox status code.
9214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9215 * @param pu32Dst Where to return the dword.
9216 * @param iSegReg The index of the segment register to use for
9217 * this access. The base and limits are checked.
9218 * @param GCPtrMem The address of the guest memory.
9219 */
9220IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9221{
9222 /* The lazy approach for now... */
9223 uint32_t const *pu32Src;
9224 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9225 if (rc == VINF_SUCCESS)
9226 {
9227 *pu32Dst = *pu32Src;
9228 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9229 }
9230 return rc;
9231}
9232
9233
9234#ifdef IEM_WITH_SETJMP
9235
9236IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9237{
9238 Assert(cbMem >= 1);
9239 Assert(iSegReg < X86_SREG_COUNT);
9240
9241 /*
9242 * 64-bit mode is simpler.
9243 */
9244 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9245 {
9246 if (iSegReg >= X86_SREG_FS)
9247 {
9248 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9249 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9250 GCPtrMem += pSel->u64Base;
9251 }
9252
9253 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9254 return GCPtrMem;
9255 }
9256 /*
9257 * 16-bit and 32-bit segmentation.
9258 */
9259 else
9260 {
9261 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9262 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9263 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9264 == X86DESCATTR_P /* data, expand up */
9265 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9266 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9267 {
9268 /* expand up */
9269 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9270 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9271 && GCPtrLast32 > (uint32_t)GCPtrMem))
9272 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9273 }
9274 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9275 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9276 {
9277 /* expand down */
9278 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9279 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9280 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9281 && GCPtrLast32 > (uint32_t)GCPtrMem))
9282 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9283 }
9284 else
9285 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9286 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9287 }
9288 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9289}
9290
9291
9292IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9293{
9294 Assert(cbMem >= 1);
9295 Assert(iSegReg < X86_SREG_COUNT);
9296
9297 /*
9298 * 64-bit mode is simpler.
9299 */
9300 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9301 {
9302 if (iSegReg >= X86_SREG_FS)
9303 {
9304 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9305 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9306 GCPtrMem += pSel->u64Base;
9307 }
9308
9309 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9310 return GCPtrMem;
9311 }
9312 /*
9313 * 16-bit and 32-bit segmentation.
9314 */
9315 else
9316 {
9317 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9318 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9319 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9320 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9321 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9322 {
9323 /* expand up */
9324 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9325 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9326 && GCPtrLast32 > (uint32_t)GCPtrMem))
9327 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9328 }
9329 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9330 {
9331 /* expand down */
9332 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9333 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9334 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9335 && GCPtrLast32 > (uint32_t)GCPtrMem))
9336 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9337 }
9338 else
9339 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9340 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9341 }
9342 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9343}
9344
9345
9346/**
9347 * Fetches a data dword, longjmp on error, fallback/safe version.
9348 *
9349 * @returns The dword
9350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9351 * @param iSegReg The index of the segment register to use for
9352 * this access. The base and limits are checked.
9353 * @param GCPtrMem The address of the guest memory.
9354 */
9355IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9356{
9357 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9358 uint32_t const u32Ret = *pu32Src;
9359 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9360 return u32Ret;
9361}
9362
9363
9364/**
9365 * Fetches a data dword, longjmp on error.
9366 *
9367 * @returns The dword
9368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9369 * @param iSegReg The index of the segment register to use for
9370 * this access. The base and limits are checked.
9371 * @param GCPtrMem The address of the guest memory.
9372 */
9373DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9374{
9375# ifdef IEM_WITH_DATA_TLB
9376 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9377 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9378 {
9379 /// @todo more later.
9380 }
9381
9382 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9383# else
9384 /* The lazy approach. */
9385 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9386 uint32_t const u32Ret = *pu32Src;
9387 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9388 return u32Ret;
9389# endif
9390}
9391#endif
9392
9393
9394#ifdef SOME_UNUSED_FUNCTION
9395/**
9396 * Fetches a data dword and sign extends it to a qword.
9397 *
9398 * @returns Strict VBox status code.
9399 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9400 * @param pu64Dst Where to return the sign extended value.
9401 * @param iSegReg The index of the segment register to use for
9402 * this access. The base and limits are checked.
9403 * @param GCPtrMem The address of the guest memory.
9404 */
9405IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9406{
9407 /* The lazy approach for now... */
9408 int32_t const *pi32Src;
9409 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9410 if (rc == VINF_SUCCESS)
9411 {
9412 *pu64Dst = *pi32Src;
9413 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9414 }
9415#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9416 else
9417 *pu64Dst = 0;
9418#endif
9419 return rc;
9420}
9421#endif
9422
9423
9424/**
9425 * Fetches a data qword.
9426 *
9427 * @returns Strict VBox status code.
9428 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9429 * @param pu64Dst Where to return the qword.
9430 * @param iSegReg The index of the segment register to use for
9431 * this access. The base and limits are checked.
9432 * @param GCPtrMem The address of the guest memory.
9433 */
9434IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9435{
9436 /* The lazy approach for now... */
9437 uint64_t const *pu64Src;
9438 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9439 if (rc == VINF_SUCCESS)
9440 {
9441 *pu64Dst = *pu64Src;
9442 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9443 }
9444 return rc;
9445}
9446
9447
9448#ifdef IEM_WITH_SETJMP
9449/**
9450 * Fetches a data qword, longjmp on error.
9451 *
9452 * @returns The qword.
9453 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9454 * @param iSegReg The index of the segment register to use for
9455 * this access. The base and limits are checked.
9456 * @param GCPtrMem The address of the guest memory.
9457 */
9458DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9459{
9460 /* The lazy approach for now... */
9461 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9462 uint64_t const u64Ret = *pu64Src;
9463 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9464 return u64Ret;
9465}
9466#endif
9467
9468
9469/**
9470 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9471 *
9472 * @returns Strict VBox status code.
9473 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9474 * @param pu64Dst Where to return the qword.
9475 * @param iSegReg The index of the segment register to use for
9476 * this access. The base and limits are checked.
9477 * @param GCPtrMem The address of the guest memory.
9478 */
9479IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9480{
9481 /* The lazy approach for now... */
9482 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9483 if (RT_UNLIKELY(GCPtrMem & 15))
9484 return iemRaiseGeneralProtectionFault0(pVCpu);
9485
9486 uint64_t const *pu64Src;
9487 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9488 if (rc == VINF_SUCCESS)
9489 {
9490 *pu64Dst = *pu64Src;
9491 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9492 }
9493 return rc;
9494}
9495
9496
9497#ifdef IEM_WITH_SETJMP
9498/**
9499 * Fetches a data qword, longjmp on error.
9500 *
9501 * @returns The qword.
9502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9503 * @param iSegReg The index of the segment register to use for
9504 * this access. The base and limits are checked.
9505 * @param GCPtrMem The address of the guest memory.
9506 */
9507DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9508{
9509 /* The lazy approach for now... */
9510 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9511 if (RT_LIKELY(!(GCPtrMem & 15)))
9512 {
9513 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9514 uint64_t const u64Ret = *pu64Src;
9515 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9516 return u64Ret;
9517 }
9518
9519 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9520 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9521}
9522#endif
9523
9524
9525/**
9526 * Fetches a data tword.
9527 *
9528 * @returns Strict VBox status code.
9529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9530 * @param pr80Dst Where to return the tword.
9531 * @param iSegReg The index of the segment register to use for
9532 * this access. The base and limits are checked.
9533 * @param GCPtrMem The address of the guest memory.
9534 */
9535IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9536{
9537 /* The lazy approach for now... */
9538 PCRTFLOAT80U pr80Src;
9539 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9540 if (rc == VINF_SUCCESS)
9541 {
9542 *pr80Dst = *pr80Src;
9543 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9544 }
9545 return rc;
9546}
9547
9548
9549#ifdef IEM_WITH_SETJMP
9550/**
9551 * Fetches a data tword, longjmp on error.
9552 *
9553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9554 * @param pr80Dst Where to return the tword.
9555 * @param iSegReg The index of the segment register to use for
9556 * this access. The base and limits are checked.
9557 * @param GCPtrMem The address of the guest memory.
9558 */
9559DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9560{
9561 /* The lazy approach for now... */
9562 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9563 *pr80Dst = *pr80Src;
9564 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9565}
9566#endif
9567
9568
9569/**
9570 * Fetches a data dqword (double qword), generally SSE related.
9571 *
9572 * @returns Strict VBox status code.
9573 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9574 * @param pu128Dst Where to return the qword.
9575 * @param iSegReg The index of the segment register to use for
9576 * this access. The base and limits are checked.
9577 * @param GCPtrMem The address of the guest memory.
9578 */
9579IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9580{
9581 /* The lazy approach for now... */
9582 PCRTUINT128U pu128Src;
9583 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9584 if (rc == VINF_SUCCESS)
9585 {
9586 pu128Dst->au64[0] = pu128Src->au64[0];
9587 pu128Dst->au64[1] = pu128Src->au64[1];
9588 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9589 }
9590 return rc;
9591}
9592
9593
9594#ifdef IEM_WITH_SETJMP
9595/**
9596 * Fetches a data dqword (double qword), generally SSE related.
9597 *
9598 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9599 * @param pu128Dst Where to return the qword.
9600 * @param iSegReg The index of the segment register to use for
9601 * this access. The base and limits are checked.
9602 * @param GCPtrMem The address of the guest memory.
9603 */
9604IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9605{
9606 /* The lazy approach for now... */
9607 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9608 pu128Dst->au64[0] = pu128Src->au64[0];
9609 pu128Dst->au64[1] = pu128Src->au64[1];
9610 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9611}
9612#endif
9613
9614
9615/**
9616 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9617 * related.
9618 *
9619 * Raises \#GP(0) if not aligned.
9620 *
9621 * @returns Strict VBox status code.
9622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9623 * @param pu128Dst Where to return the qword.
9624 * @param iSegReg The index of the segment register to use for
9625 * this access. The base and limits are checked.
9626 * @param GCPtrMem The address of the guest memory.
9627 */
9628IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9629{
9630 /* The lazy approach for now... */
9631 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9632 if ( (GCPtrMem & 15)
9633 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9634 return iemRaiseGeneralProtectionFault0(pVCpu);
9635
9636 PCRTUINT128U pu128Src;
9637 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9638 if (rc == VINF_SUCCESS)
9639 {
9640 pu128Dst->au64[0] = pu128Src->au64[0];
9641 pu128Dst->au64[1] = pu128Src->au64[1];
9642 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9643 }
9644 return rc;
9645}
9646
9647
9648#ifdef IEM_WITH_SETJMP
9649/**
9650 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9651 * related, longjmp on error.
9652 *
9653 * Raises \#GP(0) if not aligned.
9654 *
9655 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9656 * @param pu128Dst Where to return the qword.
9657 * @param iSegReg The index of the segment register to use for
9658 * this access. The base and limits are checked.
9659 * @param GCPtrMem The address of the guest memory.
9660 */
9661DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9662{
9663 /* The lazy approach for now... */
9664 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9665 if ( (GCPtrMem & 15) == 0
9666 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9667 {
9668 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9669 pu128Dst->au64[0] = pu128Src->au64[0];
9670 pu128Dst->au64[1] = pu128Src->au64[1];
9671 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9672 return;
9673 }
9674
9675 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9676 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9677}
9678#endif
9679
9680
9681/**
9682 * Fetches a data oword (octo word), generally AVX related.
9683 *
9684 * @returns Strict VBox status code.
9685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9686 * @param pu256Dst Where to return the qword.
9687 * @param iSegReg The index of the segment register to use for
9688 * this access. The base and limits are checked.
9689 * @param GCPtrMem The address of the guest memory.
9690 */
9691IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9692{
9693 /* The lazy approach for now... */
9694 PCRTUINT256U pu256Src;
9695 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9696 if (rc == VINF_SUCCESS)
9697 {
9698 pu256Dst->au64[0] = pu256Src->au64[0];
9699 pu256Dst->au64[1] = pu256Src->au64[1];
9700 pu256Dst->au64[2] = pu256Src->au64[2];
9701 pu256Dst->au64[3] = pu256Src->au64[3];
9702 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9703 }
9704 return rc;
9705}
9706
9707
9708#ifdef IEM_WITH_SETJMP
9709/**
9710 * Fetches a data oword (octo word), generally AVX related.
9711 *
9712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9713 * @param pu256Dst Where to return the qword.
9714 * @param iSegReg The index of the segment register to use for
9715 * this access. The base and limits are checked.
9716 * @param GCPtrMem The address of the guest memory.
9717 */
9718IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9719{
9720 /* The lazy approach for now... */
9721 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9722 pu256Dst->au64[0] = pu256Src->au64[0];
9723 pu256Dst->au64[1] = pu256Src->au64[1];
9724 pu256Dst->au64[2] = pu256Src->au64[2];
9725 pu256Dst->au64[3] = pu256Src->au64[3];
9726 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9727}
9728#endif
9729
9730
9731/**
9732 * Fetches a data oword (octo word) at an aligned address, generally AVX
9733 * related.
9734 *
9735 * Raises \#GP(0) if not aligned.
9736 *
9737 * @returns Strict VBox status code.
9738 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9739 * @param pu256Dst Where to return the qword.
9740 * @param iSegReg The index of the segment register to use for
9741 * this access. The base and limits are checked.
9742 * @param GCPtrMem The address of the guest memory.
9743 */
9744IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9745{
9746 /* The lazy approach for now... */
9747 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9748 if (GCPtrMem & 31)
9749 return iemRaiseGeneralProtectionFault0(pVCpu);
9750
9751 PCRTUINT256U pu256Src;
9752 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9753 if (rc == VINF_SUCCESS)
9754 {
9755 pu256Dst->au64[0] = pu256Src->au64[0];
9756 pu256Dst->au64[1] = pu256Src->au64[1];
9757 pu256Dst->au64[2] = pu256Src->au64[2];
9758 pu256Dst->au64[3] = pu256Src->au64[3];
9759 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9760 }
9761 return rc;
9762}
9763
9764
9765#ifdef IEM_WITH_SETJMP
9766/**
9767 * Fetches a data oword (octo word) at an aligned address, generally AVX
9768 * related, longjmp on error.
9769 *
9770 * Raises \#GP(0) if not aligned.
9771 *
9772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9773 * @param pu256Dst Where to return the qword.
9774 * @param iSegReg The index of the segment register to use for
9775 * this access. The base and limits are checked.
9776 * @param GCPtrMem The address of the guest memory.
9777 */
9778DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9779{
9780 /* The lazy approach for now... */
9781 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9782 if ((GCPtrMem & 31) == 0)
9783 {
9784 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9785 pu256Dst->au64[0] = pu256Src->au64[0];
9786 pu256Dst->au64[1] = pu256Src->au64[1];
9787 pu256Dst->au64[2] = pu256Src->au64[2];
9788 pu256Dst->au64[3] = pu256Src->au64[3];
9789 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9790 return;
9791 }
9792
9793 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9794 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9795}
9796#endif
9797
9798
9799
9800/**
9801 * Fetches a descriptor register (lgdt, lidt).
9802 *
9803 * @returns Strict VBox status code.
9804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9805 * @param pcbLimit Where to return the limit.
9806 * @param pGCPtrBase Where to return the base.
9807 * @param iSegReg The index of the segment register to use for
9808 * this access. The base and limits are checked.
9809 * @param GCPtrMem The address of the guest memory.
9810 * @param enmOpSize The effective operand size.
9811 */
9812IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9813 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9814{
9815 /*
9816 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9817 * little special:
9818 * - The two reads are done separately.
9819 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9820 * - We suspect the 386 to actually commit the limit before the base in
9821 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9822 * don't try emulate this eccentric behavior, because it's not well
9823 * enough understood and rather hard to trigger.
9824 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9825 */
9826 VBOXSTRICTRC rcStrict;
9827 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9828 {
9829 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9830 if (rcStrict == VINF_SUCCESS)
9831 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9832 }
9833 else
9834 {
9835 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9836 if (enmOpSize == IEMMODE_32BIT)
9837 {
9838 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9839 {
9840 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9841 if (rcStrict == VINF_SUCCESS)
9842 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9843 }
9844 else
9845 {
9846 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9847 if (rcStrict == VINF_SUCCESS)
9848 {
9849 *pcbLimit = (uint16_t)uTmp;
9850 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9851 }
9852 }
9853 if (rcStrict == VINF_SUCCESS)
9854 *pGCPtrBase = uTmp;
9855 }
9856 else
9857 {
9858 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9859 if (rcStrict == VINF_SUCCESS)
9860 {
9861 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9862 if (rcStrict == VINF_SUCCESS)
9863 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9864 }
9865 }
9866 }
9867 return rcStrict;
9868}
9869
9870
9871
9872/**
9873 * Stores a data byte.
9874 *
9875 * @returns Strict VBox status code.
9876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9877 * @param iSegReg The index of the segment register to use for
9878 * this access. The base and limits are checked.
9879 * @param GCPtrMem The address of the guest memory.
9880 * @param u8Value The value to store.
9881 */
9882IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9883{
9884 /* The lazy approach for now... */
9885 uint8_t *pu8Dst;
9886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9887 if (rc == VINF_SUCCESS)
9888 {
9889 *pu8Dst = u8Value;
9890 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9891 }
9892 return rc;
9893}
9894
9895
9896#ifdef IEM_WITH_SETJMP
9897/**
9898 * Stores a data byte, longjmp on error.
9899 *
9900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9901 * @param iSegReg The index of the segment register to use for
9902 * this access. The base and limits are checked.
9903 * @param GCPtrMem The address of the guest memory.
9904 * @param u8Value The value to store.
9905 */
9906IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9907{
9908 /* The lazy approach for now... */
9909 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9910 *pu8Dst = u8Value;
9911 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9912}
9913#endif
9914
9915
9916/**
9917 * Stores a data word.
9918 *
9919 * @returns Strict VBox status code.
9920 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9921 * @param iSegReg The index of the segment register to use for
9922 * this access. The base and limits are checked.
9923 * @param GCPtrMem The address of the guest memory.
9924 * @param u16Value The value to store.
9925 */
9926IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9927{
9928 /* The lazy approach for now... */
9929 uint16_t *pu16Dst;
9930 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9931 if (rc == VINF_SUCCESS)
9932 {
9933 *pu16Dst = u16Value;
9934 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9935 }
9936 return rc;
9937}
9938
9939
9940#ifdef IEM_WITH_SETJMP
9941/**
9942 * Stores a data word, longjmp on error.
9943 *
9944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9945 * @param iSegReg The index of the segment register to use for
9946 * this access. The base and limits are checked.
9947 * @param GCPtrMem The address of the guest memory.
9948 * @param u16Value The value to store.
9949 */
9950IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9951{
9952 /* The lazy approach for now... */
9953 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9954 *pu16Dst = u16Value;
9955 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9956}
9957#endif
9958
9959
9960/**
9961 * Stores a data dword.
9962 *
9963 * @returns Strict VBox status code.
9964 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9965 * @param iSegReg The index of the segment register to use for
9966 * this access. The base and limits are checked.
9967 * @param GCPtrMem The address of the guest memory.
9968 * @param u32Value The value to store.
9969 */
9970IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9971{
9972 /* The lazy approach for now... */
9973 uint32_t *pu32Dst;
9974 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9975 if (rc == VINF_SUCCESS)
9976 {
9977 *pu32Dst = u32Value;
9978 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9979 }
9980 return rc;
9981}
9982
9983
9984#ifdef IEM_WITH_SETJMP
9985/**
9986 * Stores a data dword.
9987 *
9988 * @returns Strict VBox status code.
9989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9990 * @param iSegReg The index of the segment register to use for
9991 * this access. The base and limits are checked.
9992 * @param GCPtrMem The address of the guest memory.
9993 * @param u32Value The value to store.
9994 */
9995IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9996{
9997 /* The lazy approach for now... */
9998 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9999 *pu32Dst = u32Value;
10000 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10001}
10002#endif
10003
10004
10005/**
10006 * Stores a data qword.
10007 *
10008 * @returns Strict VBox status code.
10009 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10010 * @param iSegReg The index of the segment register to use for
10011 * this access. The base and limits are checked.
10012 * @param GCPtrMem The address of the guest memory.
10013 * @param u64Value The value to store.
10014 */
10015IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10016{
10017 /* The lazy approach for now... */
10018 uint64_t *pu64Dst;
10019 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10020 if (rc == VINF_SUCCESS)
10021 {
10022 *pu64Dst = u64Value;
10023 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10024 }
10025 return rc;
10026}
10027
10028
10029#ifdef IEM_WITH_SETJMP
10030/**
10031 * Stores a data qword, longjmp on error.
10032 *
10033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10034 * @param iSegReg The index of the segment register to use for
10035 * this access. The base and limits are checked.
10036 * @param GCPtrMem The address of the guest memory.
10037 * @param u64Value The value to store.
10038 */
10039IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10040{
10041 /* The lazy approach for now... */
10042 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10043 *pu64Dst = u64Value;
10044 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10045}
10046#endif
10047
10048
10049/**
10050 * Stores a data dqword.
10051 *
10052 * @returns Strict VBox status code.
10053 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10054 * @param iSegReg The index of the segment register to use for
10055 * this access. The base and limits are checked.
10056 * @param GCPtrMem The address of the guest memory.
10057 * @param u128Value The value to store.
10058 */
10059IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10060{
10061 /* The lazy approach for now... */
10062 PRTUINT128U pu128Dst;
10063 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10064 if (rc == VINF_SUCCESS)
10065 {
10066 pu128Dst->au64[0] = u128Value.au64[0];
10067 pu128Dst->au64[1] = u128Value.au64[1];
10068 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10069 }
10070 return rc;
10071}
10072
10073
10074#ifdef IEM_WITH_SETJMP
10075/**
10076 * Stores a data dqword, longjmp on error.
10077 *
10078 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10079 * @param iSegReg The index of the segment register to use for
10080 * this access. The base and limits are checked.
10081 * @param GCPtrMem The address of the guest memory.
10082 * @param u128Value The value to store.
10083 */
10084IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10085{
10086 /* The lazy approach for now... */
10087 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10088 pu128Dst->au64[0] = u128Value.au64[0];
10089 pu128Dst->au64[1] = u128Value.au64[1];
10090 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10091}
10092#endif
10093
10094
10095/**
10096 * Stores a data dqword, SSE aligned.
10097 *
10098 * @returns Strict VBox status code.
10099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10100 * @param iSegReg The index of the segment register to use for
10101 * this access. The base and limits are checked.
10102 * @param GCPtrMem The address of the guest memory.
10103 * @param u128Value The value to store.
10104 */
10105IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10106{
10107 /* The lazy approach for now... */
10108 if ( (GCPtrMem & 15)
10109 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10110 return iemRaiseGeneralProtectionFault0(pVCpu);
10111
10112 PRTUINT128U pu128Dst;
10113 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10114 if (rc == VINF_SUCCESS)
10115 {
10116 pu128Dst->au64[0] = u128Value.au64[0];
10117 pu128Dst->au64[1] = u128Value.au64[1];
10118 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10119 }
10120 return rc;
10121}
10122
10123
10124#ifdef IEM_WITH_SETJMP
10125/**
10126 * Stores a data dqword, SSE aligned.
10127 *
10128 * @returns Strict VBox status code.
10129 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10130 * @param iSegReg The index of the segment register to use for
10131 * this access. The base and limits are checked.
10132 * @param GCPtrMem The address of the guest memory.
10133 * @param u128Value The value to store.
10134 */
10135DECL_NO_INLINE(IEM_STATIC, void)
10136iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10137{
10138 /* The lazy approach for now... */
10139 if ( (GCPtrMem & 15) == 0
10140 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10141 {
10142 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10143 pu128Dst->au64[0] = u128Value.au64[0];
10144 pu128Dst->au64[1] = u128Value.au64[1];
10145 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10146 return;
10147 }
10148
10149 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10150 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10151}
10152#endif
10153
10154
10155/**
10156 * Stores a data dqword.
10157 *
10158 * @returns Strict VBox status code.
10159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10160 * @param iSegReg The index of the segment register to use for
10161 * this access. The base and limits are checked.
10162 * @param GCPtrMem The address of the guest memory.
10163 * @param pu256Value Pointer to the value to store.
10164 */
10165IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10166{
10167 /* The lazy approach for now... */
10168 PRTUINT256U pu256Dst;
10169 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10170 if (rc == VINF_SUCCESS)
10171 {
10172 pu256Dst->au64[0] = pu256Value->au64[0];
10173 pu256Dst->au64[1] = pu256Value->au64[1];
10174 pu256Dst->au64[2] = pu256Value->au64[2];
10175 pu256Dst->au64[3] = pu256Value->au64[3];
10176 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10177 }
10178 return rc;
10179}
10180
10181
10182#ifdef IEM_WITH_SETJMP
10183/**
10184 * Stores a data dqword, longjmp on error.
10185 *
10186 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10187 * @param iSegReg The index of the segment register to use for
10188 * this access. The base and limits are checked.
10189 * @param GCPtrMem The address of the guest memory.
10190 * @param pu256Value Pointer to the value to store.
10191 */
10192IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10193{
10194 /* The lazy approach for now... */
10195 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10196 pu256Dst->au64[0] = pu256Value->au64[0];
10197 pu256Dst->au64[1] = pu256Value->au64[1];
10198 pu256Dst->au64[2] = pu256Value->au64[2];
10199 pu256Dst->au64[3] = pu256Value->au64[3];
10200 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10201}
10202#endif
10203
10204
10205/**
10206 * Stores a data dqword, AVX aligned.
10207 *
10208 * @returns Strict VBox status code.
10209 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10210 * @param iSegReg The index of the segment register to use for
10211 * this access. The base and limits are checked.
10212 * @param GCPtrMem The address of the guest memory.
10213 * @param pu256Value Pointer to the value to store.
10214 */
10215IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10216{
10217 /* The lazy approach for now... */
10218 if (GCPtrMem & 31)
10219 return iemRaiseGeneralProtectionFault0(pVCpu);
10220
10221 PRTUINT256U pu256Dst;
10222 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10223 if (rc == VINF_SUCCESS)
10224 {
10225 pu256Dst->au64[0] = pu256Value->au64[0];
10226 pu256Dst->au64[1] = pu256Value->au64[1];
10227 pu256Dst->au64[2] = pu256Value->au64[2];
10228 pu256Dst->au64[3] = pu256Value->au64[3];
10229 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10230 }
10231 return rc;
10232}
10233
10234
10235#ifdef IEM_WITH_SETJMP
10236/**
10237 * Stores a data dqword, AVX aligned.
10238 *
10239 * @returns Strict VBox status code.
10240 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10241 * @param iSegReg The index of the segment register to use for
10242 * this access. The base and limits are checked.
10243 * @param GCPtrMem The address of the guest memory.
10244 * @param pu256Value Pointer to the value to store.
10245 */
10246DECL_NO_INLINE(IEM_STATIC, void)
10247iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10248{
10249 /* The lazy approach for now... */
10250 if ((GCPtrMem & 31) == 0)
10251 {
10252 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10253 pu256Dst->au64[0] = pu256Value->au64[0];
10254 pu256Dst->au64[1] = pu256Value->au64[1];
10255 pu256Dst->au64[2] = pu256Value->au64[2];
10256 pu256Dst->au64[3] = pu256Value->au64[3];
10257 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10258 return;
10259 }
10260
10261 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10262 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10263}
10264#endif
10265
10266
10267/**
10268 * Stores a descriptor register (sgdt, sidt).
10269 *
10270 * @returns Strict VBox status code.
10271 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10272 * @param cbLimit The limit.
10273 * @param GCPtrBase The base address.
10274 * @param iSegReg The index of the segment register to use for
10275 * this access. The base and limits are checked.
10276 * @param GCPtrMem The address of the guest memory.
10277 */
10278IEM_STATIC VBOXSTRICTRC
10279iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10280{
10281 /*
10282 * The SIDT and SGDT instructions actually stores the data using two
10283 * independent writes. The instructions does not respond to opsize prefixes.
10284 */
10285 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10286 if (rcStrict == VINF_SUCCESS)
10287 {
10288 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10289 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10290 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10291 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10292 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10293 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10294 else
10295 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10296 }
10297 return rcStrict;
10298}
10299
10300
10301/**
10302 * Pushes a word onto the stack.
10303 *
10304 * @returns Strict VBox status code.
10305 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10306 * @param u16Value The value to push.
10307 */
10308IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10309{
10310 /* Increment the stack pointer. */
10311 uint64_t uNewRsp;
10312 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10313
10314 /* Write the word the lazy way. */
10315 uint16_t *pu16Dst;
10316 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10317 if (rc == VINF_SUCCESS)
10318 {
10319 *pu16Dst = u16Value;
10320 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10321 }
10322
10323 /* Commit the new RSP value unless we an access handler made trouble. */
10324 if (rc == VINF_SUCCESS)
10325 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10326
10327 return rc;
10328}
10329
10330
10331/**
10332 * Pushes a dword onto the stack.
10333 *
10334 * @returns Strict VBox status code.
10335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10336 * @param u32Value The value to push.
10337 */
10338IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10339{
10340 /* Increment the stack pointer. */
10341 uint64_t uNewRsp;
10342 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10343
10344 /* Write the dword the lazy way. */
10345 uint32_t *pu32Dst;
10346 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10347 if (rc == VINF_SUCCESS)
10348 {
10349 *pu32Dst = u32Value;
10350 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10351 }
10352
10353 /* Commit the new RSP value unless we an access handler made trouble. */
10354 if (rc == VINF_SUCCESS)
10355 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10356
10357 return rc;
10358}
10359
10360
10361/**
10362 * Pushes a dword segment register value onto the stack.
10363 *
10364 * @returns Strict VBox status code.
10365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10366 * @param u32Value The value to push.
10367 */
10368IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10369{
10370 /* Increment the stack pointer. */
10371 uint64_t uNewRsp;
10372 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10373
10374 /* The intel docs talks about zero extending the selector register
10375 value. My actual intel CPU here might be zero extending the value
10376 but it still only writes the lower word... */
10377 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10378 * happens when crossing an electric page boundrary, is the high word checked
10379 * for write accessibility or not? Probably it is. What about segment limits?
10380 * It appears this behavior is also shared with trap error codes.
10381 *
10382 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10383 * ancient hardware when it actually did change. */
10384 uint16_t *pu16Dst;
10385 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10386 if (rc == VINF_SUCCESS)
10387 {
10388 *pu16Dst = (uint16_t)u32Value;
10389 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10390 }
10391
10392 /* Commit the new RSP value unless we an access handler made trouble. */
10393 if (rc == VINF_SUCCESS)
10394 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10395
10396 return rc;
10397}
10398
10399
10400/**
10401 * Pushes a qword onto the stack.
10402 *
10403 * @returns Strict VBox status code.
10404 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10405 * @param u64Value The value to push.
10406 */
10407IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10408{
10409 /* Increment the stack pointer. */
10410 uint64_t uNewRsp;
10411 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10412
10413 /* Write the word the lazy way. */
10414 uint64_t *pu64Dst;
10415 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10416 if (rc == VINF_SUCCESS)
10417 {
10418 *pu64Dst = u64Value;
10419 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10420 }
10421
10422 /* Commit the new RSP value unless we an access handler made trouble. */
10423 if (rc == VINF_SUCCESS)
10424 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10425
10426 return rc;
10427}
10428
10429
10430/**
10431 * Pops a word from the stack.
10432 *
10433 * @returns Strict VBox status code.
10434 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10435 * @param pu16Value Where to store the popped value.
10436 */
10437IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10438{
10439 /* Increment the stack pointer. */
10440 uint64_t uNewRsp;
10441 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10442
10443 /* Write the word the lazy way. */
10444 uint16_t const *pu16Src;
10445 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10446 if (rc == VINF_SUCCESS)
10447 {
10448 *pu16Value = *pu16Src;
10449 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10450
10451 /* Commit the new RSP value. */
10452 if (rc == VINF_SUCCESS)
10453 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10454 }
10455
10456 return rc;
10457}
10458
10459
10460/**
10461 * Pops a dword from the stack.
10462 *
10463 * @returns Strict VBox status code.
10464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10465 * @param pu32Value Where to store the popped value.
10466 */
10467IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10468{
10469 /* Increment the stack pointer. */
10470 uint64_t uNewRsp;
10471 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10472
10473 /* Write the word the lazy way. */
10474 uint32_t const *pu32Src;
10475 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10476 if (rc == VINF_SUCCESS)
10477 {
10478 *pu32Value = *pu32Src;
10479 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10480
10481 /* Commit the new RSP value. */
10482 if (rc == VINF_SUCCESS)
10483 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10484 }
10485
10486 return rc;
10487}
10488
10489
10490/**
10491 * Pops a qword from the stack.
10492 *
10493 * @returns Strict VBox status code.
10494 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10495 * @param pu64Value Where to store the popped value.
10496 */
10497IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10498{
10499 /* Increment the stack pointer. */
10500 uint64_t uNewRsp;
10501 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10502
10503 /* Write the word the lazy way. */
10504 uint64_t const *pu64Src;
10505 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10506 if (rc == VINF_SUCCESS)
10507 {
10508 *pu64Value = *pu64Src;
10509 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10510
10511 /* Commit the new RSP value. */
10512 if (rc == VINF_SUCCESS)
10513 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10514 }
10515
10516 return rc;
10517}
10518
10519
10520/**
10521 * Pushes a word onto the stack, using a temporary stack pointer.
10522 *
10523 * @returns Strict VBox status code.
10524 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10525 * @param u16Value The value to push.
10526 * @param pTmpRsp Pointer to the temporary stack pointer.
10527 */
10528IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10529{
10530 /* Increment the stack pointer. */
10531 RTUINT64U NewRsp = *pTmpRsp;
10532 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10533
10534 /* Write the word the lazy way. */
10535 uint16_t *pu16Dst;
10536 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10537 if (rc == VINF_SUCCESS)
10538 {
10539 *pu16Dst = u16Value;
10540 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10541 }
10542
10543 /* Commit the new RSP value unless we an access handler made trouble. */
10544 if (rc == VINF_SUCCESS)
10545 *pTmpRsp = NewRsp;
10546
10547 return rc;
10548}
10549
10550
10551/**
10552 * Pushes a dword onto the stack, using a temporary stack pointer.
10553 *
10554 * @returns Strict VBox status code.
10555 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10556 * @param u32Value The value to push.
10557 * @param pTmpRsp Pointer to the temporary stack pointer.
10558 */
10559IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10560{
10561 /* Increment the stack pointer. */
10562 RTUINT64U NewRsp = *pTmpRsp;
10563 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10564
10565 /* Write the word the lazy way. */
10566 uint32_t *pu32Dst;
10567 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10568 if (rc == VINF_SUCCESS)
10569 {
10570 *pu32Dst = u32Value;
10571 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10572 }
10573
10574 /* Commit the new RSP value unless we an access handler made trouble. */
10575 if (rc == VINF_SUCCESS)
10576 *pTmpRsp = NewRsp;
10577
10578 return rc;
10579}
10580
10581
10582/**
10583 * Pushes a dword onto the stack, using a temporary stack pointer.
10584 *
10585 * @returns Strict VBox status code.
10586 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10587 * @param u64Value The value to push.
10588 * @param pTmpRsp Pointer to the temporary stack pointer.
10589 */
10590IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10591{
10592 /* Increment the stack pointer. */
10593 RTUINT64U NewRsp = *pTmpRsp;
10594 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10595
10596 /* Write the word the lazy way. */
10597 uint64_t *pu64Dst;
10598 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10599 if (rc == VINF_SUCCESS)
10600 {
10601 *pu64Dst = u64Value;
10602 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10603 }
10604
10605 /* Commit the new RSP value unless we an access handler made trouble. */
10606 if (rc == VINF_SUCCESS)
10607 *pTmpRsp = NewRsp;
10608
10609 return rc;
10610}
10611
10612
10613/**
10614 * Pops a word from the stack, using a temporary stack pointer.
10615 *
10616 * @returns Strict VBox status code.
10617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10618 * @param pu16Value Where to store the popped value.
10619 * @param pTmpRsp Pointer to the temporary stack pointer.
10620 */
10621IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10622{
10623 /* Increment the stack pointer. */
10624 RTUINT64U NewRsp = *pTmpRsp;
10625 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10626
10627 /* Write the word the lazy way. */
10628 uint16_t const *pu16Src;
10629 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10630 if (rc == VINF_SUCCESS)
10631 {
10632 *pu16Value = *pu16Src;
10633 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10634
10635 /* Commit the new RSP value. */
10636 if (rc == VINF_SUCCESS)
10637 *pTmpRsp = NewRsp;
10638 }
10639
10640 return rc;
10641}
10642
10643
10644/**
10645 * Pops a dword from the stack, using a temporary stack pointer.
10646 *
10647 * @returns Strict VBox status code.
10648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10649 * @param pu32Value Where to store the popped value.
10650 * @param pTmpRsp Pointer to the temporary stack pointer.
10651 */
10652IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10653{
10654 /* Increment the stack pointer. */
10655 RTUINT64U NewRsp = *pTmpRsp;
10656 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10657
10658 /* Write the word the lazy way. */
10659 uint32_t const *pu32Src;
10660 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10661 if (rc == VINF_SUCCESS)
10662 {
10663 *pu32Value = *pu32Src;
10664 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10665
10666 /* Commit the new RSP value. */
10667 if (rc == VINF_SUCCESS)
10668 *pTmpRsp = NewRsp;
10669 }
10670
10671 return rc;
10672}
10673
10674
10675/**
10676 * Pops a qword from the stack, using a temporary stack pointer.
10677 *
10678 * @returns Strict VBox status code.
10679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10680 * @param pu64Value Where to store the popped value.
10681 * @param pTmpRsp Pointer to the temporary stack pointer.
10682 */
10683IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10684{
10685 /* Increment the stack pointer. */
10686 RTUINT64U NewRsp = *pTmpRsp;
10687 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10688
10689 /* Write the word the lazy way. */
10690 uint64_t const *pu64Src;
10691 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10692 if (rcStrict == VINF_SUCCESS)
10693 {
10694 *pu64Value = *pu64Src;
10695 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10696
10697 /* Commit the new RSP value. */
10698 if (rcStrict == VINF_SUCCESS)
10699 *pTmpRsp = NewRsp;
10700 }
10701
10702 return rcStrict;
10703}
10704
10705
10706/**
10707 * Begin a special stack push (used by interrupt, exceptions and such).
10708 *
10709 * This will raise \#SS or \#PF if appropriate.
10710 *
10711 * @returns Strict VBox status code.
10712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10713 * @param cbMem The number of bytes to push onto the stack.
10714 * @param ppvMem Where to return the pointer to the stack memory.
10715 * As with the other memory functions this could be
10716 * direct access or bounce buffered access, so
10717 * don't commit register until the commit call
10718 * succeeds.
10719 * @param puNewRsp Where to return the new RSP value. This must be
10720 * passed unchanged to
10721 * iemMemStackPushCommitSpecial().
10722 */
10723IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10724{
10725 Assert(cbMem < UINT8_MAX);
10726 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10727 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10728}
10729
10730
10731/**
10732 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10733 *
10734 * This will update the rSP.
10735 *
10736 * @returns Strict VBox status code.
10737 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10738 * @param pvMem The pointer returned by
10739 * iemMemStackPushBeginSpecial().
10740 * @param uNewRsp The new RSP value returned by
10741 * iemMemStackPushBeginSpecial().
10742 */
10743IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10744{
10745 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10746 if (rcStrict == VINF_SUCCESS)
10747 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10748 return rcStrict;
10749}
10750
10751
10752/**
10753 * Begin a special stack pop (used by iret, retf and such).
10754 *
10755 * This will raise \#SS or \#PF if appropriate.
10756 *
10757 * @returns Strict VBox status code.
10758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10759 * @param cbMem The number of bytes to pop from the stack.
10760 * @param ppvMem Where to return the pointer to the stack memory.
10761 * @param puNewRsp Where to return the new RSP value. This must be
10762 * assigned to CPUMCTX::rsp manually some time
10763 * after iemMemStackPopDoneSpecial() has been
10764 * called.
10765 */
10766IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10767{
10768 Assert(cbMem < UINT8_MAX);
10769 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10770 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10771}
10772
10773
10774/**
10775 * Continue a special stack pop (used by iret and retf).
10776 *
10777 * This will raise \#SS or \#PF if appropriate.
10778 *
10779 * @returns Strict VBox status code.
10780 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10781 * @param cbMem The number of bytes to pop from the stack.
10782 * @param ppvMem Where to return the pointer to the stack memory.
10783 * @param puNewRsp Where to return the new RSP value. This must be
10784 * assigned to CPUMCTX::rsp manually some time
10785 * after iemMemStackPopDoneSpecial() has been
10786 * called.
10787 */
10788IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10789{
10790 Assert(cbMem < UINT8_MAX);
10791 RTUINT64U NewRsp;
10792 NewRsp.u = *puNewRsp;
10793 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10794 *puNewRsp = NewRsp.u;
10795 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10796}
10797
10798
10799/**
10800 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10801 * iemMemStackPopContinueSpecial).
10802 *
10803 * The caller will manually commit the rSP.
10804 *
10805 * @returns Strict VBox status code.
10806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10807 * @param pvMem The pointer returned by
10808 * iemMemStackPopBeginSpecial() or
10809 * iemMemStackPopContinueSpecial().
10810 */
10811IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10812{
10813 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10814}
10815
10816
10817/**
10818 * Fetches a system table byte.
10819 *
10820 * @returns Strict VBox status code.
10821 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10822 * @param pbDst Where to return the byte.
10823 * @param iSegReg The index of the segment register to use for
10824 * this access. The base and limits are checked.
10825 * @param GCPtrMem The address of the guest memory.
10826 */
10827IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10828{
10829 /* The lazy approach for now... */
10830 uint8_t const *pbSrc;
10831 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10832 if (rc == VINF_SUCCESS)
10833 {
10834 *pbDst = *pbSrc;
10835 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10836 }
10837 return rc;
10838}
10839
10840
10841/**
10842 * Fetches a system table word.
10843 *
10844 * @returns Strict VBox status code.
10845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10846 * @param pu16Dst Where to return the word.
10847 * @param iSegReg The index of the segment register to use for
10848 * this access. The base and limits are checked.
10849 * @param GCPtrMem The address of the guest memory.
10850 */
10851IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10852{
10853 /* The lazy approach for now... */
10854 uint16_t const *pu16Src;
10855 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10856 if (rc == VINF_SUCCESS)
10857 {
10858 *pu16Dst = *pu16Src;
10859 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10860 }
10861 return rc;
10862}
10863
10864
10865/**
10866 * Fetches a system table dword.
10867 *
10868 * @returns Strict VBox status code.
10869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10870 * @param pu32Dst Where to return the dword.
10871 * @param iSegReg The index of the segment register to use for
10872 * this access. The base and limits are checked.
10873 * @param GCPtrMem The address of the guest memory.
10874 */
10875IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10876{
10877 /* The lazy approach for now... */
10878 uint32_t const *pu32Src;
10879 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10880 if (rc == VINF_SUCCESS)
10881 {
10882 *pu32Dst = *pu32Src;
10883 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10884 }
10885 return rc;
10886}
10887
10888
10889/**
10890 * Fetches a system table qword.
10891 *
10892 * @returns Strict VBox status code.
10893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10894 * @param pu64Dst Where to return the qword.
10895 * @param iSegReg The index of the segment register to use for
10896 * this access. The base and limits are checked.
10897 * @param GCPtrMem The address of the guest memory.
10898 */
10899IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10900{
10901 /* The lazy approach for now... */
10902 uint64_t const *pu64Src;
10903 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10904 if (rc == VINF_SUCCESS)
10905 {
10906 *pu64Dst = *pu64Src;
10907 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10908 }
10909 return rc;
10910}
10911
10912
10913/**
10914 * Fetches a descriptor table entry with caller specified error code.
10915 *
10916 * @returns Strict VBox status code.
10917 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10918 * @param pDesc Where to return the descriptor table entry.
10919 * @param uSel The selector which table entry to fetch.
10920 * @param uXcpt The exception to raise on table lookup error.
10921 * @param uErrorCode The error code associated with the exception.
10922 */
10923IEM_STATIC VBOXSTRICTRC
10924iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10925{
10926 AssertPtr(pDesc);
10927 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10928
10929 /** @todo did the 286 require all 8 bytes to be accessible? */
10930 /*
10931 * Get the selector table base and check bounds.
10932 */
10933 RTGCPTR GCPtrBase;
10934 if (uSel & X86_SEL_LDT)
10935 {
10936 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10937 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10938 {
10939 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10940 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10941 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10942 uErrorCode, 0);
10943 }
10944
10945 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10946 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10947 }
10948 else
10949 {
10950 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10951 {
10952 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10953 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10954 uErrorCode, 0);
10955 }
10956 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10957 }
10958
10959 /*
10960 * Read the legacy descriptor and maybe the long mode extensions if
10961 * required.
10962 */
10963 VBOXSTRICTRC rcStrict;
10964 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10965 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10966 else
10967 {
10968 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10969 if (rcStrict == VINF_SUCCESS)
10970 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10971 if (rcStrict == VINF_SUCCESS)
10972 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10973 if (rcStrict == VINF_SUCCESS)
10974 pDesc->Legacy.au16[3] = 0;
10975 else
10976 return rcStrict;
10977 }
10978
10979 if (rcStrict == VINF_SUCCESS)
10980 {
10981 if ( !IEM_IS_LONG_MODE(pVCpu)
10982 || pDesc->Legacy.Gen.u1DescType)
10983 pDesc->Long.au64[1] = 0;
10984 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10985 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10986 else
10987 {
10988 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10989 /** @todo is this the right exception? */
10990 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10991 }
10992 }
10993 return rcStrict;
10994}
10995
10996
10997/**
10998 * Fetches a descriptor table entry.
10999 *
11000 * @returns Strict VBox status code.
11001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11002 * @param pDesc Where to return the descriptor table entry.
11003 * @param uSel The selector which table entry to fetch.
11004 * @param uXcpt The exception to raise on table lookup error.
11005 */
11006IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11007{
11008 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11009}
11010
11011
11012/**
11013 * Fakes a long mode stack selector for SS = 0.
11014 *
11015 * @param pDescSs Where to return the fake stack descriptor.
11016 * @param uDpl The DPL we want.
11017 */
11018IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11019{
11020 pDescSs->Long.au64[0] = 0;
11021 pDescSs->Long.au64[1] = 0;
11022 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11023 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11024 pDescSs->Long.Gen.u2Dpl = uDpl;
11025 pDescSs->Long.Gen.u1Present = 1;
11026 pDescSs->Long.Gen.u1Long = 1;
11027}
11028
11029
11030/**
11031 * Marks the selector descriptor as accessed (only non-system descriptors).
11032 *
11033 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11034 * will therefore skip the limit checks.
11035 *
11036 * @returns Strict VBox status code.
11037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11038 * @param uSel The selector.
11039 */
11040IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11041{
11042 /*
11043 * Get the selector table base and calculate the entry address.
11044 */
11045 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11046 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11047 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11048 GCPtr += uSel & X86_SEL_MASK;
11049
11050 /*
11051 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11052 * ugly stuff to avoid this. This will make sure it's an atomic access
11053 * as well more or less remove any question about 8-bit or 32-bit accesss.
11054 */
11055 VBOXSTRICTRC rcStrict;
11056 uint32_t volatile *pu32;
11057 if ((GCPtr & 3) == 0)
11058 {
11059 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11060 GCPtr += 2 + 2;
11061 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11062 if (rcStrict != VINF_SUCCESS)
11063 return rcStrict;
11064 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11065 }
11066 else
11067 {
11068 /* The misaligned GDT/LDT case, map the whole thing. */
11069 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11070 if (rcStrict != VINF_SUCCESS)
11071 return rcStrict;
11072 switch ((uintptr_t)pu32 & 3)
11073 {
11074 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11075 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11076 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11077 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11078 }
11079 }
11080
11081 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11082}
11083
11084/** @} */
11085
11086
11087/*
11088 * Include the C/C++ implementation of instruction.
11089 */
11090#include "IEMAllCImpl.cpp.h"
11091
11092
11093
11094/** @name "Microcode" macros.
11095 *
11096 * The idea is that we should be able to use the same code to interpret
11097 * instructions as well as recompiler instructions. Thus this obfuscation.
11098 *
11099 * @{
11100 */
11101#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11102#define IEM_MC_END() }
11103#define IEM_MC_PAUSE() do {} while (0)
11104#define IEM_MC_CONTINUE() do {} while (0)
11105
11106/** Internal macro. */
11107#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11108 do \
11109 { \
11110 VBOXSTRICTRC rcStrict2 = a_Expr; \
11111 if (rcStrict2 != VINF_SUCCESS) \
11112 return rcStrict2; \
11113 } while (0)
11114
11115
11116#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11117#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11118#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11119#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11120#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11121#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11122#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11123#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11124#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11125 do { \
11126 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11127 return iemRaiseDeviceNotAvailable(pVCpu); \
11128 } while (0)
11129#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11130 do { \
11131 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11132 return iemRaiseDeviceNotAvailable(pVCpu); \
11133 } while (0)
11134#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11135 do { \
11136 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11137 return iemRaiseMathFault(pVCpu); \
11138 } while (0)
11139#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11140 do { \
11141 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11142 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11143 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11144 return iemRaiseUndefinedOpcode(pVCpu); \
11145 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11146 return iemRaiseDeviceNotAvailable(pVCpu); \
11147 } while (0)
11148#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11149 do { \
11150 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11151 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11152 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11153 return iemRaiseUndefinedOpcode(pVCpu); \
11154 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11155 return iemRaiseDeviceNotAvailable(pVCpu); \
11156 } while (0)
11157#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11158 do { \
11159 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11160 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11161 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11162 return iemRaiseUndefinedOpcode(pVCpu); \
11163 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11164 return iemRaiseDeviceNotAvailable(pVCpu); \
11165 } while (0)
11166#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11167 do { \
11168 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11169 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11170 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11171 return iemRaiseUndefinedOpcode(pVCpu); \
11172 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11173 return iemRaiseDeviceNotAvailable(pVCpu); \
11174 } while (0)
11175#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11176 do { \
11177 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11178 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11179 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11180 return iemRaiseUndefinedOpcode(pVCpu); \
11181 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11182 return iemRaiseDeviceNotAvailable(pVCpu); \
11183 } while (0)
11184#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11185 do { \
11186 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11187 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11188 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11189 return iemRaiseUndefinedOpcode(pVCpu); \
11190 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11191 return iemRaiseDeviceNotAvailable(pVCpu); \
11192 } while (0)
11193#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11194 do { \
11195 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11196 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11197 return iemRaiseUndefinedOpcode(pVCpu); \
11198 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11199 return iemRaiseDeviceNotAvailable(pVCpu); \
11200 } while (0)
11201#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11202 do { \
11203 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11204 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11205 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11206 return iemRaiseUndefinedOpcode(pVCpu); \
11207 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11208 return iemRaiseDeviceNotAvailable(pVCpu); \
11209 } while (0)
11210#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11211 do { \
11212 if (pVCpu->iem.s.uCpl != 0) \
11213 return iemRaiseGeneralProtectionFault0(pVCpu); \
11214 } while (0)
11215#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11216 do { \
11217 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11218 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11219 } while (0)
11220#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11221 do { \
11222 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11223 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11224 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11225 return iemRaiseUndefinedOpcode(pVCpu); \
11226 } while (0)
11227#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11228 do { \
11229 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11230 return iemRaiseGeneralProtectionFault0(pVCpu); \
11231 } while (0)
11232
11233
11234#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11235#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11236#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11237#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11238#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11239#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11240#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11241 uint32_t a_Name; \
11242 uint32_t *a_pName = &a_Name
11243#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11244 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11245
11246#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11247#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11248
11249#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11250#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11251#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11252#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11253#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11254#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11255#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11256#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11257#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11258#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11259#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11260#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11261#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11262#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11263#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11264#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11265#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11266#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11267 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11268 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11269 } while (0)
11270#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11271 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11272 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11273 } while (0)
11274#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11275 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11276 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11277 } while (0)
11278/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11279#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11280 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11281 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11282 } while (0)
11283#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11284 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11285 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11286 } while (0)
11287/** @note Not for IOPL or IF testing or modification. */
11288#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11289#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11290#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11291#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11292
11293#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11294#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11295#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11296#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11297#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11298#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11299#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11300#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11301#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11302#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11303/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11304#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11305 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11306 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11307 } while (0)
11308#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11309 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11310 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11311 } while (0)
11312#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11313 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11314
11315
11316#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11317#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11318/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11319 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11320#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11321#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11322/** @note Not for IOPL or IF testing or modification. */
11323#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11324
11325#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11326#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11327#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11328 do { \
11329 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11330 *pu32Reg += (a_u32Value); \
11331 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11332 } while (0)
11333#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11334
11335#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11336#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11337#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11338 do { \
11339 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11340 *pu32Reg -= (a_u32Value); \
11341 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11342 } while (0)
11343#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11344#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11345
11346#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11347#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11348#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11349#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11350#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11351#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11352#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11353
11354#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11355#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11356#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11357#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11358
11359#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11360#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11361#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11362
11363#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11364#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11365#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11366
11367#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11368#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11369#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11370
11371#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11372#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11373#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11374
11375#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11376
11377#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11378
11379#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11380#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11381#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11382 do { \
11383 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11384 *pu32Reg &= (a_u32Value); \
11385 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11386 } while (0)
11387#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11388
11389#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11390#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11391#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11392 do { \
11393 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11394 *pu32Reg |= (a_u32Value); \
11395 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11396 } while (0)
11397#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11398
11399
11400/** @note Not for IOPL or IF modification. */
11401#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11402/** @note Not for IOPL or IF modification. */
11403#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11404/** @note Not for IOPL or IF modification. */
11405#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11406
11407#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11408
11409/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11410#define IEM_MC_FPU_TO_MMX_MODE() do { \
11411 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11412 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11413 } while (0)
11414
11415/** Switches the FPU state from MMX mode (FTW=0xffff). */
11416#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11417 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11418 } while (0)
11419
11420#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11421 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11422#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11423 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11424#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11425 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11426 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11427 } while (0)
11428#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11429 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11430 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11431 } while (0)
11432#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11433 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11434#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11435 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11436#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11437 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11438
11439#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11440 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11441 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11442 } while (0)
11443#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11444 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11445#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11446 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11447#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11448 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11449#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11450 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11451 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11452 } while (0)
11453#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11454 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11455#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11456 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11457 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11458 } while (0)
11459#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11460 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11461#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11462 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11463 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11464 } while (0)
11465#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11466 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11467#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11468 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11469#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11470 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11471#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11472 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11473#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11474 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11475 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11476 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11477 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11478 } while (0)
11479
11480#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11481 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11482 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11483 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11484 } while (0)
11485#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11486 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11487 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11488 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11489 } while (0)
11490#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11491 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11492 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11493 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11494 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11495 } while (0)
11496#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11497 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11498 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11499 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11500 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11501 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11502 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11503 } while (0)
11504
11505#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11506#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11507 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11508 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11509 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11510 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11511 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11512 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11513 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11514 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11515 } while (0)
11516#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11517 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11518 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11519 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11520 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11521 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11522 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11523 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11524 } while (0)
11525#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11526 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11527 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11528 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11529 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11530 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11531 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11532 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11533 } while (0)
11534#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11535 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11536 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11537 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11538 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11539 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11540 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11541 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11542 } while (0)
11543
11544#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11545 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11546#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11547 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11548#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11549 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11550#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11551 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11552 uintptr_t const iYRegTmp = (a_iYReg); \
11553 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11554 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11555 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11556 } while (0)
11557
11558#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11559 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11560 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11561 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11564 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11566 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11567 } while (0)
11568#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11569 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11570 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11571 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11572 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11573 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11574 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11575 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11576 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11577 } while (0)
11578#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11579 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11580 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11581 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11582 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11583 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11584 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11585 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11586 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11587 } while (0)
11588
11589#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11590 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11591 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11592 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11593 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11594 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11595 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11596 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11598 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11599 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11600 } while (0)
11601#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11602 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11603 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11604 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11605 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11606 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11607 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11608 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11609 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11610 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11611 } while (0)
11612#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11613 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11614 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11615 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11616 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11617 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11618 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11619 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11620 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11621 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11622 } while (0)
11623#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11624 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11625 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11626 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11627 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11628 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11629 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11630 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11631 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11632 } while (0)
11633
11634#ifndef IEM_WITH_SETJMP
11635# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11637# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11638 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11639# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11640 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11641#else
11642# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11643 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11644# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11645 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11646# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11647 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11648#endif
11649
11650#ifndef IEM_WITH_SETJMP
11651# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11652 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11653# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11655# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11657#else
11658# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11659 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11660# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11661 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11662# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11663 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11664#endif
11665
11666#ifndef IEM_WITH_SETJMP
11667# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11668 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11669# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11670 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11671# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11672 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11673#else
11674# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11675 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11676# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11677 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11678# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11679 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11680#endif
11681
11682#ifdef SOME_UNUSED_FUNCTION
11683# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11684 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11685#endif
11686
11687#ifndef IEM_WITH_SETJMP
11688# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11690# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11692# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11694# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11696#else
11697# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11698 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11699# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11700 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11701# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11702 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11703# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11704 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11705#endif
11706
11707#ifndef IEM_WITH_SETJMP
11708# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11709 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11710# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11712# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11713 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11714#else
11715# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11716 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11717# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11718 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11719# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11720 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11721#endif
11722
11723#ifndef IEM_WITH_SETJMP
11724# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11726# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11728#else
11729# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11730 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11731# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11732 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11733#endif
11734
11735#ifndef IEM_WITH_SETJMP
11736# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11737 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11738# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11740#else
11741# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11742 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11743# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11744 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11745#endif
11746
11747
11748
11749#ifndef IEM_WITH_SETJMP
11750# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11751 do { \
11752 uint8_t u8Tmp; \
11753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11754 (a_u16Dst) = u8Tmp; \
11755 } while (0)
11756# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11757 do { \
11758 uint8_t u8Tmp; \
11759 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11760 (a_u32Dst) = u8Tmp; \
11761 } while (0)
11762# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11763 do { \
11764 uint8_t u8Tmp; \
11765 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11766 (a_u64Dst) = u8Tmp; \
11767 } while (0)
11768# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11769 do { \
11770 uint16_t u16Tmp; \
11771 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11772 (a_u32Dst) = u16Tmp; \
11773 } while (0)
11774# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11775 do { \
11776 uint16_t u16Tmp; \
11777 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11778 (a_u64Dst) = u16Tmp; \
11779 } while (0)
11780# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11781 do { \
11782 uint32_t u32Tmp; \
11783 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11784 (a_u64Dst) = u32Tmp; \
11785 } while (0)
11786#else /* IEM_WITH_SETJMP */
11787# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11788 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11789# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11790 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11791# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11792 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11793# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11794 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11795# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11796 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11797# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11798 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11799#endif /* IEM_WITH_SETJMP */
11800
11801#ifndef IEM_WITH_SETJMP
11802# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11803 do { \
11804 uint8_t u8Tmp; \
11805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11806 (a_u16Dst) = (int8_t)u8Tmp; \
11807 } while (0)
11808# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11809 do { \
11810 uint8_t u8Tmp; \
11811 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11812 (a_u32Dst) = (int8_t)u8Tmp; \
11813 } while (0)
11814# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11815 do { \
11816 uint8_t u8Tmp; \
11817 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11818 (a_u64Dst) = (int8_t)u8Tmp; \
11819 } while (0)
11820# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11821 do { \
11822 uint16_t u16Tmp; \
11823 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11824 (a_u32Dst) = (int16_t)u16Tmp; \
11825 } while (0)
11826# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11827 do { \
11828 uint16_t u16Tmp; \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11830 (a_u64Dst) = (int16_t)u16Tmp; \
11831 } while (0)
11832# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11833 do { \
11834 uint32_t u32Tmp; \
11835 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11836 (a_u64Dst) = (int32_t)u32Tmp; \
11837 } while (0)
11838#else /* IEM_WITH_SETJMP */
11839# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11840 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11841# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11842 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11843# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11844 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11845# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11846 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11847# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11848 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11849# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11850 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11851#endif /* IEM_WITH_SETJMP */
11852
11853#ifndef IEM_WITH_SETJMP
11854# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11855 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11856# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11857 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11858# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11860# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11862#else
11863# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11864 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11865# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11866 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11867# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11868 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11869# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11870 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11871#endif
11872
11873#ifndef IEM_WITH_SETJMP
11874# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11875 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11876# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11878# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11880# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11881 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11882#else
11883# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11884 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11885# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11886 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11887# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11888 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11889# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11890 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11891#endif
11892
11893#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11894#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11895#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11896#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11897#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11898#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11899#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11900 do { \
11901 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11902 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11903 } while (0)
11904
11905#ifndef IEM_WITH_SETJMP
11906# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11908# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11910#else
11911# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11912 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11913# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11914 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11915#endif
11916
11917#ifndef IEM_WITH_SETJMP
11918# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11919 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11920# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11922#else
11923# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11924 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11925# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11926 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11927#endif
11928
11929
11930#define IEM_MC_PUSH_U16(a_u16Value) \
11931 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11932#define IEM_MC_PUSH_U32(a_u32Value) \
11933 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11934#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11935 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11936#define IEM_MC_PUSH_U64(a_u64Value) \
11937 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11938
11939#define IEM_MC_POP_U16(a_pu16Value) \
11940 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11941#define IEM_MC_POP_U32(a_pu32Value) \
11942 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11943#define IEM_MC_POP_U64(a_pu64Value) \
11944 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11945
11946/** Maps guest memory for direct or bounce buffered access.
11947 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11948 * @remarks May return.
11949 */
11950#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11951 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11952
11953/** Maps guest memory for direct or bounce buffered access.
11954 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11955 * @remarks May return.
11956 */
11957#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11958 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11959
11960/** Commits the memory and unmaps the guest memory.
11961 * @remarks May return.
11962 */
11963#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11965
11966/** Commits the memory and unmaps the guest memory unless the FPU status word
11967 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11968 * that would cause FLD not to store.
11969 *
11970 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11971 * store, while \#P will not.
11972 *
11973 * @remarks May in theory return - for now.
11974 */
11975#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11976 do { \
11977 if ( !(a_u16FSW & X86_FSW_ES) \
11978 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11979 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11980 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11981 } while (0)
11982
11983/** Calculate efficient address from R/M. */
11984#ifndef IEM_WITH_SETJMP
11985# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11986 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11987#else
11988# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11989 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11990#endif
11991
11992#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11993#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11994#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11995#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11996#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11997#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11998#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11999
12000/**
12001 * Defers the rest of the instruction emulation to a C implementation routine
12002 * and returns, only taking the standard parameters.
12003 *
12004 * @param a_pfnCImpl The pointer to the C routine.
12005 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12006 */
12007#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12008
12009/**
12010 * Defers the rest of instruction emulation to a C implementation routine and
12011 * returns, taking one argument in addition to the standard ones.
12012 *
12013 * @param a_pfnCImpl The pointer to the C routine.
12014 * @param a0 The argument.
12015 */
12016#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12017
12018/**
12019 * Defers the rest of the instruction emulation to a C implementation routine
12020 * and returns, taking two arguments in addition to the standard ones.
12021 *
12022 * @param a_pfnCImpl The pointer to the C routine.
12023 * @param a0 The first extra argument.
12024 * @param a1 The second extra argument.
12025 */
12026#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12027
12028/**
12029 * Defers the rest of the instruction emulation to a C implementation routine
12030 * and returns, taking three arguments in addition to the standard ones.
12031 *
12032 * @param a_pfnCImpl The pointer to the C routine.
12033 * @param a0 The first extra argument.
12034 * @param a1 The second extra argument.
12035 * @param a2 The third extra argument.
12036 */
12037#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12038
12039/**
12040 * Defers the rest of the instruction emulation to a C implementation routine
12041 * and returns, taking four arguments in addition to the standard ones.
12042 *
12043 * @param a_pfnCImpl The pointer to the C routine.
12044 * @param a0 The first extra argument.
12045 * @param a1 The second extra argument.
12046 * @param a2 The third extra argument.
12047 * @param a3 The fourth extra argument.
12048 */
12049#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12050
12051/**
12052 * Defers the rest of the instruction emulation to a C implementation routine
12053 * and returns, taking two arguments in addition to the standard ones.
12054 *
12055 * @param a_pfnCImpl The pointer to the C routine.
12056 * @param a0 The first extra argument.
12057 * @param a1 The second extra argument.
12058 * @param a2 The third extra argument.
12059 * @param a3 The fourth extra argument.
12060 * @param a4 The fifth extra argument.
12061 */
12062#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12063
12064/**
12065 * Defers the entire instruction emulation to a C implementation routine and
12066 * returns, only taking the standard parameters.
12067 *
12068 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12069 *
12070 * @param a_pfnCImpl The pointer to the C routine.
12071 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12072 */
12073#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12074
12075/**
12076 * Defers the entire instruction emulation to a C implementation routine and
12077 * returns, taking one argument in addition to the standard ones.
12078 *
12079 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12080 *
12081 * @param a_pfnCImpl The pointer to the C routine.
12082 * @param a0 The argument.
12083 */
12084#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12085
12086/**
12087 * Defers the entire instruction emulation to a C implementation routine and
12088 * returns, taking two arguments in addition to the standard ones.
12089 *
12090 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12091 *
12092 * @param a_pfnCImpl The pointer to the C routine.
12093 * @param a0 The first extra argument.
12094 * @param a1 The second extra argument.
12095 */
12096#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12097
12098/**
12099 * Defers the entire instruction emulation to a C implementation routine and
12100 * returns, taking three arguments in addition to the standard ones.
12101 *
12102 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12103 *
12104 * @param a_pfnCImpl The pointer to the C routine.
12105 * @param a0 The first extra argument.
12106 * @param a1 The second extra argument.
12107 * @param a2 The third extra argument.
12108 */
12109#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12110
12111/**
12112 * Calls a FPU assembly implementation taking one visible argument.
12113 *
12114 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12115 * @param a0 The first extra argument.
12116 */
12117#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12118 do { \
12119 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12120 } while (0)
12121
12122/**
12123 * Calls a FPU assembly implementation taking two visible arguments.
12124 *
12125 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12126 * @param a0 The first extra argument.
12127 * @param a1 The second extra argument.
12128 */
12129#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12130 do { \
12131 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12132 } while (0)
12133
12134/**
12135 * Calls a FPU assembly implementation taking three visible arguments.
12136 *
12137 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12138 * @param a0 The first extra argument.
12139 * @param a1 The second extra argument.
12140 * @param a2 The third extra argument.
12141 */
12142#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12143 do { \
12144 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12145 } while (0)
12146
12147#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12148 do { \
12149 (a_FpuData).FSW = (a_FSW); \
12150 (a_FpuData).r80Result = *(a_pr80Value); \
12151 } while (0)
12152
12153/** Pushes FPU result onto the stack. */
12154#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12155 iemFpuPushResult(pVCpu, &a_FpuData)
12156/** Pushes FPU result onto the stack and sets the FPUDP. */
12157#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12158 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12159
12160/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12161#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12162 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12163
12164/** Stores FPU result in a stack register. */
12165#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12166 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12167/** Stores FPU result in a stack register and pops the stack. */
12168#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12169 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12170/** Stores FPU result in a stack register and sets the FPUDP. */
12171#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12172 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12173/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12174 * stack. */
12175#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12176 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12177
12178/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12179#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12180 iemFpuUpdateOpcodeAndIp(pVCpu)
12181/** Free a stack register (for FFREE and FFREEP). */
12182#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12183 iemFpuStackFree(pVCpu, a_iStReg)
12184/** Increment the FPU stack pointer. */
12185#define IEM_MC_FPU_STACK_INC_TOP() \
12186 iemFpuStackIncTop(pVCpu)
12187/** Decrement the FPU stack pointer. */
12188#define IEM_MC_FPU_STACK_DEC_TOP() \
12189 iemFpuStackDecTop(pVCpu)
12190
12191/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12192#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12193 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12194/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12195#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12196 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12197/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12198#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12199 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12200/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12201#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12202 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12203/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12204 * stack. */
12205#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12206 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12207/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12208#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12209 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12210
12211/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12212#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12213 iemFpuStackUnderflow(pVCpu, a_iStDst)
12214/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12215 * stack. */
12216#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12217 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12218/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12219 * FPUDS. */
12220#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12221 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12222/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12223 * FPUDS. Pops stack. */
12224#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12225 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12226/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12227 * stack twice. */
12228#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12229 iemFpuStackUnderflowThenPopPop(pVCpu)
12230/** Raises a FPU stack underflow exception for an instruction pushing a result
12231 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12232#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12233 iemFpuStackPushUnderflow(pVCpu)
12234/** Raises a FPU stack underflow exception for an instruction pushing a result
12235 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12236#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12237 iemFpuStackPushUnderflowTwo(pVCpu)
12238
12239/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12240 * FPUIP, FPUCS and FOP. */
12241#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12242 iemFpuStackPushOverflow(pVCpu)
12243/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12244 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12245#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12246 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12247/** Prepares for using the FPU state.
12248 * Ensures that we can use the host FPU in the current context (RC+R0.
12249 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12250#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12251/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12252#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12253/** Actualizes the guest FPU state so it can be accessed and modified. */
12254#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12255
12256/** Prepares for using the SSE state.
12257 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12258 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12259#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12260/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12261#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12262/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12263#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12264
12265/** Prepares for using the AVX state.
12266 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12267 * Ensures the guest AVX state in the CPUMCTX is up to date.
12268 * @note This will include the AVX512 state too when support for it is added
12269 * due to the zero extending feature of VEX instruction. */
12270#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12271/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12272#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12273/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12274#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12275
12276/**
12277 * Calls a MMX assembly implementation taking two visible arguments.
12278 *
12279 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12280 * @param a0 The first extra argument.
12281 * @param a1 The second extra argument.
12282 */
12283#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12284 do { \
12285 IEM_MC_PREPARE_FPU_USAGE(); \
12286 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12287 } while (0)
12288
12289/**
12290 * Calls a MMX assembly implementation taking three visible arguments.
12291 *
12292 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12293 * @param a0 The first extra argument.
12294 * @param a1 The second extra argument.
12295 * @param a2 The third extra argument.
12296 */
12297#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12298 do { \
12299 IEM_MC_PREPARE_FPU_USAGE(); \
12300 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12301 } while (0)
12302
12303
12304/**
12305 * Calls a SSE assembly implementation taking two visible arguments.
12306 *
12307 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12308 * @param a0 The first extra argument.
12309 * @param a1 The second extra argument.
12310 */
12311#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12312 do { \
12313 IEM_MC_PREPARE_SSE_USAGE(); \
12314 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12315 } while (0)
12316
12317/**
12318 * Calls a SSE assembly implementation taking three visible arguments.
12319 *
12320 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12321 * @param a0 The first extra argument.
12322 * @param a1 The second extra argument.
12323 * @param a2 The third extra argument.
12324 */
12325#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12326 do { \
12327 IEM_MC_PREPARE_SSE_USAGE(); \
12328 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12329 } while (0)
12330
12331
12332/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12333 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12334#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12335 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12336
12337/**
12338 * Calls a AVX assembly implementation taking two visible arguments.
12339 *
12340 * There is one implicit zero'th argument, a pointer to the extended state.
12341 *
12342 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12343 * @param a1 The first extra argument.
12344 * @param a2 The second extra argument.
12345 */
12346#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12347 do { \
12348 IEM_MC_PREPARE_AVX_USAGE(); \
12349 a_pfnAImpl(pXState, (a1), (a2)); \
12350 } while (0)
12351
12352/**
12353 * Calls a AVX assembly implementation taking three visible arguments.
12354 *
12355 * There is one implicit zero'th argument, a pointer to the extended state.
12356 *
12357 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12358 * @param a1 The first extra argument.
12359 * @param a2 The second extra argument.
12360 * @param a3 The third extra argument.
12361 */
12362#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12363 do { \
12364 IEM_MC_PREPARE_AVX_USAGE(); \
12365 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12366 } while (0)
12367
12368/** @note Not for IOPL or IF testing. */
12369#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12370/** @note Not for IOPL or IF testing. */
12371#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12372/** @note Not for IOPL or IF testing. */
12373#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12374/** @note Not for IOPL or IF testing. */
12375#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12376/** @note Not for IOPL or IF testing. */
12377#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12378 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12379 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12380/** @note Not for IOPL or IF testing. */
12381#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12382 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12383 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12384/** @note Not for IOPL or IF testing. */
12385#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12386 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12387 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12388 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12389/** @note Not for IOPL or IF testing. */
12390#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12391 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12392 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12393 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12394#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12395#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12396#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12397/** @note Not for IOPL or IF testing. */
12398#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12399 if ( pVCpu->cpum.GstCtx.cx != 0 \
12400 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12401/** @note Not for IOPL or IF testing. */
12402#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12403 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12404 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12405/** @note Not for IOPL or IF testing. */
12406#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12407 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12408 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12409/** @note Not for IOPL or IF testing. */
12410#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12411 if ( pVCpu->cpum.GstCtx.cx != 0 \
12412 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12413/** @note Not for IOPL or IF testing. */
12414#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12415 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12416 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12417/** @note Not for IOPL or IF testing. */
12418#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12419 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12420 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12421#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12422#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12423
12424#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12425 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12426#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12427 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12428#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12429 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12430#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12431 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12432#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12433 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12434#define IEM_MC_IF_FCW_IM() \
12435 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12436
12437#define IEM_MC_ELSE() } else {
12438#define IEM_MC_ENDIF() } do {} while (0)
12439
12440/** @} */
12441
12442
12443/** @name Opcode Debug Helpers.
12444 * @{
12445 */
12446#ifdef VBOX_WITH_STATISTICS
12447# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12448#else
12449# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12450#endif
12451
12452#ifdef DEBUG
12453# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12454 do { \
12455 IEMOP_INC_STATS(a_Stats); \
12456 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12457 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12458 } while (0)
12459
12460# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12461 do { \
12462 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12463 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12464 (void)RT_CONCAT(OP_,a_Upper); \
12465 (void)(a_fDisHints); \
12466 (void)(a_fIemHints); \
12467 } while (0)
12468
12469# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12470 do { \
12471 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12472 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12473 (void)RT_CONCAT(OP_,a_Upper); \
12474 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12475 (void)(a_fDisHints); \
12476 (void)(a_fIemHints); \
12477 } while (0)
12478
12479# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12480 do { \
12481 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12482 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12483 (void)RT_CONCAT(OP_,a_Upper); \
12484 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12485 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12486 (void)(a_fDisHints); \
12487 (void)(a_fIemHints); \
12488 } while (0)
12489
12490# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12491 do { \
12492 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12493 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12494 (void)RT_CONCAT(OP_,a_Upper); \
12495 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12496 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12497 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12498 (void)(a_fDisHints); \
12499 (void)(a_fIemHints); \
12500 } while (0)
12501
12502# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12503 do { \
12504 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12505 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12506 (void)RT_CONCAT(OP_,a_Upper); \
12507 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12508 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12509 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12510 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12511 (void)(a_fDisHints); \
12512 (void)(a_fIemHints); \
12513 } while (0)
12514
12515#else
12516# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12517
12518# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12519 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12520# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12521 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12522# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12523 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12524# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12525 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12526# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12527 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12528
12529#endif
12530
12531#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12532 IEMOP_MNEMONIC0EX(a_Lower, \
12533 #a_Lower, \
12534 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12535#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12536 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12537 #a_Lower " " #a_Op1, \
12538 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12539#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12540 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12541 #a_Lower " " #a_Op1 "," #a_Op2, \
12542 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12543#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12544 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12545 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12546 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12547#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12548 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12549 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12550 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12551
12552/** @} */
12553
12554
12555/** @name Opcode Helpers.
12556 * @{
12557 */
12558
12559#ifdef IN_RING3
12560# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12561 do { \
12562 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12563 else \
12564 { \
12565 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12566 return IEMOP_RAISE_INVALID_OPCODE(); \
12567 } \
12568 } while (0)
12569#else
12570# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12571 do { \
12572 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12573 else return IEMOP_RAISE_INVALID_OPCODE(); \
12574 } while (0)
12575#endif
12576
12577/** The instruction requires a 186 or later. */
12578#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12579# define IEMOP_HLP_MIN_186() do { } while (0)
12580#else
12581# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12582#endif
12583
12584/** The instruction requires a 286 or later. */
12585#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12586# define IEMOP_HLP_MIN_286() do { } while (0)
12587#else
12588# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12589#endif
12590
12591/** The instruction requires a 386 or later. */
12592#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12593# define IEMOP_HLP_MIN_386() do { } while (0)
12594#else
12595# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12596#endif
12597
12598/** The instruction requires a 386 or later if the given expression is true. */
12599#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12600# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12601#else
12602# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12603#endif
12604
12605/** The instruction requires a 486 or later. */
12606#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12607# define IEMOP_HLP_MIN_486() do { } while (0)
12608#else
12609# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12610#endif
12611
12612/** The instruction requires a Pentium (586) or later. */
12613#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12614# define IEMOP_HLP_MIN_586() do { } while (0)
12615#else
12616# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12617#endif
12618
12619/** The instruction requires a PentiumPro (686) or later. */
12620#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12621# define IEMOP_HLP_MIN_686() do { } while (0)
12622#else
12623# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12624#endif
12625
12626
12627/** The instruction raises an \#UD in real and V8086 mode. */
12628#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12629 do \
12630 { \
12631 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12632 else return IEMOP_RAISE_INVALID_OPCODE(); \
12633 } while (0)
12634
12635#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12636/** This instruction raises an \#UD in real and V8086 mode or when not using a
12637 * 64-bit code segment when in long mode (applicable to all VMX instructions
12638 * except VMCALL).
12639 */
12640#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12641 do \
12642 { \
12643 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12644 && ( !IEM_IS_LONG_MODE(pVCpu) \
12645 || IEM_IS_64BIT_CODE(pVCpu))) \
12646 { /* likely */ } \
12647 else \
12648 { \
12649 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12650 { \
12651 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12652 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12653 return IEMOP_RAISE_INVALID_OPCODE(); \
12654 } \
12655 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12656 { \
12657 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12658 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12659 return IEMOP_RAISE_INVALID_OPCODE(); \
12660 } \
12661 } \
12662 } while (0)
12663
12664/** The instruction can only be executed in VMX operation (VMX root mode and
12665 * non-root mode).
12666 *
12667 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12668 */
12669# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12670 do \
12671 { \
12672 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12673 else \
12674 { \
12675 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12676 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12677 return IEMOP_RAISE_INVALID_OPCODE(); \
12678 } \
12679 } while (0)
12680#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12681
12682/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12683 * 64-bit mode. */
12684#define IEMOP_HLP_NO_64BIT() \
12685 do \
12686 { \
12687 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12688 return IEMOP_RAISE_INVALID_OPCODE(); \
12689 } while (0)
12690
12691/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12692 * 64-bit mode. */
12693#define IEMOP_HLP_ONLY_64BIT() \
12694 do \
12695 { \
12696 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12697 return IEMOP_RAISE_INVALID_OPCODE(); \
12698 } while (0)
12699
12700/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12701#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12702 do \
12703 { \
12704 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12705 iemRecalEffOpSize64Default(pVCpu); \
12706 } while (0)
12707
12708/** The instruction has 64-bit operand size if 64-bit mode. */
12709#define IEMOP_HLP_64BIT_OP_SIZE() \
12710 do \
12711 { \
12712 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12713 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12714 } while (0)
12715
12716/** Only a REX prefix immediately preceeding the first opcode byte takes
12717 * effect. This macro helps ensuring this as well as logging bad guest code. */
12718#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12719 do \
12720 { \
12721 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12722 { \
12723 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12724 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12725 pVCpu->iem.s.uRexB = 0; \
12726 pVCpu->iem.s.uRexIndex = 0; \
12727 pVCpu->iem.s.uRexReg = 0; \
12728 iemRecalEffOpSize(pVCpu); \
12729 } \
12730 } while (0)
12731
12732/**
12733 * Done decoding.
12734 */
12735#define IEMOP_HLP_DONE_DECODING() \
12736 do \
12737 { \
12738 /*nothing for now, maybe later... */ \
12739 } while (0)
12740
12741/**
12742 * Done decoding, raise \#UD exception if lock prefix present.
12743 */
12744#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12745 do \
12746 { \
12747 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12748 { /* likely */ } \
12749 else \
12750 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12751 } while (0)
12752
12753
12754/**
12755 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12756 * repnz or size prefixes are present, or if in real or v8086 mode.
12757 */
12758#define IEMOP_HLP_DONE_VEX_DECODING() \
12759 do \
12760 { \
12761 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12762 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12763 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12764 { /* likely */ } \
12765 else \
12766 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12767 } while (0)
12768
12769/**
12770 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12771 * repnz or size prefixes are present, or if in real or v8086 mode.
12772 */
12773#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12774 do \
12775 { \
12776 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12777 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12778 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12779 && pVCpu->iem.s.uVexLength == 0)) \
12780 { /* likely */ } \
12781 else \
12782 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12783 } while (0)
12784
12785
12786/**
12787 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12788 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12789 * register 0, or if in real or v8086 mode.
12790 */
12791#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12792 do \
12793 { \
12794 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12795 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12796 && !pVCpu->iem.s.uVex3rdReg \
12797 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12798 { /* likely */ } \
12799 else \
12800 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12801 } while (0)
12802
12803/**
12804 * Done decoding VEX, no V, L=0.
12805 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12806 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12807 */
12808#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12809 do \
12810 { \
12811 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12812 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12813 && pVCpu->iem.s.uVexLength == 0 \
12814 && pVCpu->iem.s.uVex3rdReg == 0 \
12815 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12816 { /* likely */ } \
12817 else \
12818 return IEMOP_RAISE_INVALID_OPCODE(); \
12819 } while (0)
12820
12821#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12822 do \
12823 { \
12824 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12825 { /* likely */ } \
12826 else \
12827 { \
12828 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12829 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12830 } \
12831 } while (0)
12832#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12833 do \
12834 { \
12835 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12836 { /* likely */ } \
12837 else \
12838 { \
12839 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12840 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12841 } \
12842 } while (0)
12843
12844/**
12845 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12846 * are present.
12847 */
12848#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12849 do \
12850 { \
12851 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12852 { /* likely */ } \
12853 else \
12854 return IEMOP_RAISE_INVALID_OPCODE(); \
12855 } while (0)
12856
12857/**
12858 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12859 * prefixes are present.
12860 */
12861#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12862 do \
12863 { \
12864 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12865 { /* likely */ } \
12866 else \
12867 return IEMOP_RAISE_INVALID_OPCODE(); \
12868 } while (0)
12869
12870
12871/**
12872 * Calculates the effective address of a ModR/M memory operand.
12873 *
12874 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12875 *
12876 * @return Strict VBox status code.
12877 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12878 * @param bRm The ModRM byte.
12879 * @param cbImm The size of any immediate following the
12880 * effective address opcode bytes. Important for
12881 * RIP relative addressing.
12882 * @param pGCPtrEff Where to return the effective address.
12883 */
12884IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12885{
12886 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12887# define SET_SS_DEF() \
12888 do \
12889 { \
12890 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12891 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12892 } while (0)
12893
12894 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12895 {
12896/** @todo Check the effective address size crap! */
12897 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12898 {
12899 uint16_t u16EffAddr;
12900
12901 /* Handle the disp16 form with no registers first. */
12902 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12903 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12904 else
12905 {
12906 /* Get the displacment. */
12907 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12908 {
12909 case 0: u16EffAddr = 0; break;
12910 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12911 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12912 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12913 }
12914
12915 /* Add the base and index registers to the disp. */
12916 switch (bRm & X86_MODRM_RM_MASK)
12917 {
12918 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12919 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12920 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12921 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12922 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12923 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12924 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12925 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12926 }
12927 }
12928
12929 *pGCPtrEff = u16EffAddr;
12930 }
12931 else
12932 {
12933 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12934 uint32_t u32EffAddr;
12935
12936 /* Handle the disp32 form with no registers first. */
12937 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12938 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12939 else
12940 {
12941 /* Get the register (or SIB) value. */
12942 switch ((bRm & X86_MODRM_RM_MASK))
12943 {
12944 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12945 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12946 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12947 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12948 case 4: /* SIB */
12949 {
12950 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12951
12952 /* Get the index and scale it. */
12953 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12954 {
12955 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12956 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12957 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12958 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12959 case 4: u32EffAddr = 0; /*none */ break;
12960 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12961 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12962 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12963 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12964 }
12965 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12966
12967 /* add base */
12968 switch (bSib & X86_SIB_BASE_MASK)
12969 {
12970 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12971 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12972 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12973 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12974 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12975 case 5:
12976 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12977 {
12978 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12979 SET_SS_DEF();
12980 }
12981 else
12982 {
12983 uint32_t u32Disp;
12984 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12985 u32EffAddr += u32Disp;
12986 }
12987 break;
12988 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12989 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12990 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12991 }
12992 break;
12993 }
12994 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12995 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12996 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12997 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12998 }
12999
13000 /* Get and add the displacement. */
13001 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13002 {
13003 case 0:
13004 break;
13005 case 1:
13006 {
13007 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13008 u32EffAddr += i8Disp;
13009 break;
13010 }
13011 case 2:
13012 {
13013 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13014 u32EffAddr += u32Disp;
13015 break;
13016 }
13017 default:
13018 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13019 }
13020
13021 }
13022 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13023 *pGCPtrEff = u32EffAddr;
13024 else
13025 {
13026 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13027 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13028 }
13029 }
13030 }
13031 else
13032 {
13033 uint64_t u64EffAddr;
13034
13035 /* Handle the rip+disp32 form with no registers first. */
13036 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13037 {
13038 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13039 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13040 }
13041 else
13042 {
13043 /* Get the register (or SIB) value. */
13044 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13045 {
13046 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13047 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13048 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13049 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13050 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13051 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13052 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13053 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13054 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13055 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13056 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13057 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13058 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13059 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13060 /* SIB */
13061 case 4:
13062 case 12:
13063 {
13064 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13065
13066 /* Get the index and scale it. */
13067 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13068 {
13069 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13070 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13071 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13072 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13073 case 4: u64EffAddr = 0; /*none */ break;
13074 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13075 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13076 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13077 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13078 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13079 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13080 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13081 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13082 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13083 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13084 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13086 }
13087 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13088
13089 /* add base */
13090 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13091 {
13092 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13093 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13094 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13095 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13096 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13097 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13098 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13099 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13100 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13101 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13102 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13103 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13104 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13105 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13106 /* complicated encodings */
13107 case 5:
13108 case 13:
13109 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13110 {
13111 if (!pVCpu->iem.s.uRexB)
13112 {
13113 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13114 SET_SS_DEF();
13115 }
13116 else
13117 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13118 }
13119 else
13120 {
13121 uint32_t u32Disp;
13122 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13123 u64EffAddr += (int32_t)u32Disp;
13124 }
13125 break;
13126 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13127 }
13128 break;
13129 }
13130 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13131 }
13132
13133 /* Get and add the displacement. */
13134 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13135 {
13136 case 0:
13137 break;
13138 case 1:
13139 {
13140 int8_t i8Disp;
13141 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13142 u64EffAddr += i8Disp;
13143 break;
13144 }
13145 case 2:
13146 {
13147 uint32_t u32Disp;
13148 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13149 u64EffAddr += (int32_t)u32Disp;
13150 break;
13151 }
13152 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13153 }
13154
13155 }
13156
13157 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13158 *pGCPtrEff = u64EffAddr;
13159 else
13160 {
13161 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13162 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13163 }
13164 }
13165
13166 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13167 return VINF_SUCCESS;
13168}
13169
13170
13171/**
13172 * Calculates the effective address of a ModR/M memory operand.
13173 *
13174 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13175 *
13176 * @return Strict VBox status code.
13177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13178 * @param bRm The ModRM byte.
13179 * @param cbImm The size of any immediate following the
13180 * effective address opcode bytes. Important for
13181 * RIP relative addressing.
13182 * @param pGCPtrEff Where to return the effective address.
13183 * @param offRsp RSP displacement.
13184 */
13185IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13186{
13187 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13188# define SET_SS_DEF() \
13189 do \
13190 { \
13191 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13192 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13193 } while (0)
13194
13195 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13196 {
13197/** @todo Check the effective address size crap! */
13198 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13199 {
13200 uint16_t u16EffAddr;
13201
13202 /* Handle the disp16 form with no registers first. */
13203 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13204 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13205 else
13206 {
13207 /* Get the displacment. */
13208 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13209 {
13210 case 0: u16EffAddr = 0; break;
13211 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13212 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13213 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13214 }
13215
13216 /* Add the base and index registers to the disp. */
13217 switch (bRm & X86_MODRM_RM_MASK)
13218 {
13219 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13220 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13221 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13222 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13223 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13224 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13225 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13226 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13227 }
13228 }
13229
13230 *pGCPtrEff = u16EffAddr;
13231 }
13232 else
13233 {
13234 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13235 uint32_t u32EffAddr;
13236
13237 /* Handle the disp32 form with no registers first. */
13238 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13239 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13240 else
13241 {
13242 /* Get the register (or SIB) value. */
13243 switch ((bRm & X86_MODRM_RM_MASK))
13244 {
13245 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13246 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13247 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13248 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13249 case 4: /* SIB */
13250 {
13251 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13252
13253 /* Get the index and scale it. */
13254 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13255 {
13256 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13257 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13258 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13259 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13260 case 4: u32EffAddr = 0; /*none */ break;
13261 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13262 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13263 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13264 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13265 }
13266 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13267
13268 /* add base */
13269 switch (bSib & X86_SIB_BASE_MASK)
13270 {
13271 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13272 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13273 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13274 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13275 case 4:
13276 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13277 SET_SS_DEF();
13278 break;
13279 case 5:
13280 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13281 {
13282 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13283 SET_SS_DEF();
13284 }
13285 else
13286 {
13287 uint32_t u32Disp;
13288 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13289 u32EffAddr += u32Disp;
13290 }
13291 break;
13292 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13293 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13294 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13295 }
13296 break;
13297 }
13298 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13299 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13300 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13301 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13302 }
13303
13304 /* Get and add the displacement. */
13305 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13306 {
13307 case 0:
13308 break;
13309 case 1:
13310 {
13311 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13312 u32EffAddr += i8Disp;
13313 break;
13314 }
13315 case 2:
13316 {
13317 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13318 u32EffAddr += u32Disp;
13319 break;
13320 }
13321 default:
13322 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13323 }
13324
13325 }
13326 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13327 *pGCPtrEff = u32EffAddr;
13328 else
13329 {
13330 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13331 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13332 }
13333 }
13334 }
13335 else
13336 {
13337 uint64_t u64EffAddr;
13338
13339 /* Handle the rip+disp32 form with no registers first. */
13340 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13341 {
13342 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13343 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13344 }
13345 else
13346 {
13347 /* Get the register (or SIB) value. */
13348 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13349 {
13350 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13351 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13352 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13353 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13354 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13355 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13356 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13357 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13358 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13359 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13360 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13361 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13362 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13363 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13364 /* SIB */
13365 case 4:
13366 case 12:
13367 {
13368 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13369
13370 /* Get the index and scale it. */
13371 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13372 {
13373 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13374 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13375 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13376 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13377 case 4: u64EffAddr = 0; /*none */ break;
13378 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13379 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13380 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13381 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13382 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13383 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13384 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13385 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13386 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13387 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13388 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13390 }
13391 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13392
13393 /* add base */
13394 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13395 {
13396 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13397 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13398 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13399 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13400 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13401 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13402 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13403 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13404 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13405 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13406 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13407 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13408 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13409 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13410 /* complicated encodings */
13411 case 5:
13412 case 13:
13413 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13414 {
13415 if (!pVCpu->iem.s.uRexB)
13416 {
13417 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13418 SET_SS_DEF();
13419 }
13420 else
13421 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13422 }
13423 else
13424 {
13425 uint32_t u32Disp;
13426 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13427 u64EffAddr += (int32_t)u32Disp;
13428 }
13429 break;
13430 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13431 }
13432 break;
13433 }
13434 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13435 }
13436
13437 /* Get and add the displacement. */
13438 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13439 {
13440 case 0:
13441 break;
13442 case 1:
13443 {
13444 int8_t i8Disp;
13445 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13446 u64EffAddr += i8Disp;
13447 break;
13448 }
13449 case 2:
13450 {
13451 uint32_t u32Disp;
13452 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13453 u64EffAddr += (int32_t)u32Disp;
13454 break;
13455 }
13456 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13457 }
13458
13459 }
13460
13461 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13462 *pGCPtrEff = u64EffAddr;
13463 else
13464 {
13465 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13466 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13467 }
13468 }
13469
13470 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13471 return VINF_SUCCESS;
13472}
13473
13474
13475#ifdef IEM_WITH_SETJMP
13476/**
13477 * Calculates the effective address of a ModR/M memory operand.
13478 *
13479 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13480 *
13481 * May longjmp on internal error.
13482 *
13483 * @return The effective address.
13484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13485 * @param bRm The ModRM byte.
13486 * @param cbImm The size of any immediate following the
13487 * effective address opcode bytes. Important for
13488 * RIP relative addressing.
13489 */
13490IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13491{
13492 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13493# define SET_SS_DEF() \
13494 do \
13495 { \
13496 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13497 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13498 } while (0)
13499
13500 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13501 {
13502/** @todo Check the effective address size crap! */
13503 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13504 {
13505 uint16_t u16EffAddr;
13506
13507 /* Handle the disp16 form with no registers first. */
13508 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13509 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13510 else
13511 {
13512 /* Get the displacment. */
13513 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13514 {
13515 case 0: u16EffAddr = 0; break;
13516 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13517 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13518 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13519 }
13520
13521 /* Add the base and index registers to the disp. */
13522 switch (bRm & X86_MODRM_RM_MASK)
13523 {
13524 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13525 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13526 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13527 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13528 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13529 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13530 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13531 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13532 }
13533 }
13534
13535 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13536 return u16EffAddr;
13537 }
13538
13539 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13540 uint32_t u32EffAddr;
13541
13542 /* Handle the disp32 form with no registers first. */
13543 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13544 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13545 else
13546 {
13547 /* Get the register (or SIB) value. */
13548 switch ((bRm & X86_MODRM_RM_MASK))
13549 {
13550 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13551 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13552 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13553 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13554 case 4: /* SIB */
13555 {
13556 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13557
13558 /* Get the index and scale it. */
13559 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13560 {
13561 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13562 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13563 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13564 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13565 case 4: u32EffAddr = 0; /*none */ break;
13566 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13567 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13568 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13569 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13570 }
13571 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13572
13573 /* add base */
13574 switch (bSib & X86_SIB_BASE_MASK)
13575 {
13576 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13577 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13578 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13579 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13580 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13581 case 5:
13582 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13583 {
13584 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13585 SET_SS_DEF();
13586 }
13587 else
13588 {
13589 uint32_t u32Disp;
13590 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13591 u32EffAddr += u32Disp;
13592 }
13593 break;
13594 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13595 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13596 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13597 }
13598 break;
13599 }
13600 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13601 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13602 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13603 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13604 }
13605
13606 /* Get and add the displacement. */
13607 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13608 {
13609 case 0:
13610 break;
13611 case 1:
13612 {
13613 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13614 u32EffAddr += i8Disp;
13615 break;
13616 }
13617 case 2:
13618 {
13619 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13620 u32EffAddr += u32Disp;
13621 break;
13622 }
13623 default:
13624 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13625 }
13626 }
13627
13628 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13629 {
13630 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13631 return u32EffAddr;
13632 }
13633 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13634 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13635 return u32EffAddr & UINT16_MAX;
13636 }
13637
13638 uint64_t u64EffAddr;
13639
13640 /* Handle the rip+disp32 form with no registers first. */
13641 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13642 {
13643 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13644 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13645 }
13646 else
13647 {
13648 /* Get the register (or SIB) value. */
13649 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13650 {
13651 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13652 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13653 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13654 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13655 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13656 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13657 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13658 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13659 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13660 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13661 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13662 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13663 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13664 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13665 /* SIB */
13666 case 4:
13667 case 12:
13668 {
13669 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13670
13671 /* Get the index and scale it. */
13672 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13673 {
13674 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13675 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13676 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13677 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13678 case 4: u64EffAddr = 0; /*none */ break;
13679 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13680 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13681 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13682 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13683 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13684 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13685 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13686 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13687 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13688 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13689 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13690 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13691 }
13692 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13693
13694 /* add base */
13695 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13696 {
13697 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13698 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13699 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13700 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13701 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13702 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13703 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13704 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13705 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13706 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13707 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13708 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13709 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13710 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13711 /* complicated encodings */
13712 case 5:
13713 case 13:
13714 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13715 {
13716 if (!pVCpu->iem.s.uRexB)
13717 {
13718 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13719 SET_SS_DEF();
13720 }
13721 else
13722 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13723 }
13724 else
13725 {
13726 uint32_t u32Disp;
13727 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13728 u64EffAddr += (int32_t)u32Disp;
13729 }
13730 break;
13731 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13732 }
13733 break;
13734 }
13735 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13736 }
13737
13738 /* Get and add the displacement. */
13739 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13740 {
13741 case 0:
13742 break;
13743 case 1:
13744 {
13745 int8_t i8Disp;
13746 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13747 u64EffAddr += i8Disp;
13748 break;
13749 }
13750 case 2:
13751 {
13752 uint32_t u32Disp;
13753 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13754 u64EffAddr += (int32_t)u32Disp;
13755 break;
13756 }
13757 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13758 }
13759
13760 }
13761
13762 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13763 {
13764 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13765 return u64EffAddr;
13766 }
13767 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13768 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13769 return u64EffAddr & UINT32_MAX;
13770}
13771#endif /* IEM_WITH_SETJMP */
13772
13773/** @} */
13774
13775
13776
13777/*
13778 * Include the instructions
13779 */
13780#include "IEMAllInstructions.cpp.h"
13781
13782
13783
13784#ifdef LOG_ENABLED
13785/**
13786 * Logs the current instruction.
13787 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13788 * @param fSameCtx Set if we have the same context information as the VMM,
13789 * clear if we may have already executed an instruction in
13790 * our debug context. When clear, we assume IEMCPU holds
13791 * valid CPU mode info.
13792 *
13793 * The @a fSameCtx parameter is now misleading and obsolete.
13794 * @param pszFunction The IEM function doing the execution.
13795 */
13796IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13797{
13798# ifdef IN_RING3
13799 if (LogIs2Enabled())
13800 {
13801 char szInstr[256];
13802 uint32_t cbInstr = 0;
13803 if (fSameCtx)
13804 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13805 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13806 szInstr, sizeof(szInstr), &cbInstr);
13807 else
13808 {
13809 uint32_t fFlags = 0;
13810 switch (pVCpu->iem.s.enmCpuMode)
13811 {
13812 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13813 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13814 case IEMMODE_16BIT:
13815 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13816 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13817 else
13818 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13819 break;
13820 }
13821 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13822 szInstr, sizeof(szInstr), &cbInstr);
13823 }
13824
13825 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13826 Log2(("**** %s\n"
13827 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13828 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13829 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13830 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13831 " %s\n"
13832 , pszFunction,
13833 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13834 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13835 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13836 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13837 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13838 szInstr));
13839
13840 if (LogIs3Enabled())
13841 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13842 }
13843 else
13844# endif
13845 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13846 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13847 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13848}
13849#endif /* LOG_ENABLED */
13850
13851
13852/**
13853 * Makes status code addjustments (pass up from I/O and access handler)
13854 * as well as maintaining statistics.
13855 *
13856 * @returns Strict VBox status code to pass up.
13857 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13858 * @param rcStrict The status from executing an instruction.
13859 */
13860DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13861{
13862 if (rcStrict != VINF_SUCCESS)
13863 {
13864 if (RT_SUCCESS(rcStrict))
13865 {
13866 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13867 || rcStrict == VINF_IOM_R3_IOPORT_READ
13868 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13869 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13870 || rcStrict == VINF_IOM_R3_MMIO_READ
13871 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13872 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13873 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13874 || rcStrict == VINF_CPUM_R3_MSR_READ
13875 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13876 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13877 || rcStrict == VINF_EM_RAW_TO_R3
13878 || rcStrict == VINF_EM_TRIPLE_FAULT
13879 || rcStrict == VINF_GIM_R3_HYPERCALL
13880 /* raw-mode / virt handlers only: */
13881 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13882 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13883 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13884 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13885 || rcStrict == VINF_SELM_SYNC_GDT
13886 || rcStrict == VINF_CSAM_PENDING_ACTION
13887 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13888 /* nested hw.virt codes: */
13889 || rcStrict == VINF_SVM_VMEXIT
13890 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13891/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR */
13892 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13893#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13894 if ( rcStrict == VINF_SVM_VMEXIT
13895 && rcPassUp == VINF_SUCCESS)
13896 rcStrict = VINF_SUCCESS;
13897 else
13898#endif
13899 if (rcPassUp == VINF_SUCCESS)
13900 pVCpu->iem.s.cRetInfStatuses++;
13901 else if ( rcPassUp < VINF_EM_FIRST
13902 || rcPassUp > VINF_EM_LAST
13903 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13904 {
13905 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13906 pVCpu->iem.s.cRetPassUpStatus++;
13907 rcStrict = rcPassUp;
13908 }
13909 else
13910 {
13911 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13912 pVCpu->iem.s.cRetInfStatuses++;
13913 }
13914 }
13915 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13916 pVCpu->iem.s.cRetAspectNotImplemented++;
13917 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13918 pVCpu->iem.s.cRetInstrNotImplemented++;
13919 else
13920 pVCpu->iem.s.cRetErrStatuses++;
13921 }
13922 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13923 {
13924 pVCpu->iem.s.cRetPassUpStatus++;
13925 rcStrict = pVCpu->iem.s.rcPassUp;
13926 }
13927
13928 return rcStrict;
13929}
13930
13931
13932/**
13933 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13934 * IEMExecOneWithPrefetchedByPC.
13935 *
13936 * Similar code is found in IEMExecLots.
13937 *
13938 * @return Strict VBox status code.
13939 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13940 * @param fExecuteInhibit If set, execute the instruction following CLI,
13941 * POP SS and MOV SS,GR.
13942 * @param pszFunction The calling function name.
13943 */
13944DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
13945{
13946 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13947 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13948 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13949 RT_NOREF_PV(pszFunction);
13950
13951#ifdef IEM_WITH_SETJMP
13952 VBOXSTRICTRC rcStrict;
13953 jmp_buf JmpBuf;
13954 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13955 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13956 if ((rcStrict = setjmp(JmpBuf)) == 0)
13957 {
13958 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13959 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13960 }
13961 else
13962 pVCpu->iem.s.cLongJumps++;
13963 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13964#else
13965 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13966 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13967#endif
13968 if (rcStrict == VINF_SUCCESS)
13969 pVCpu->iem.s.cInstructions++;
13970 if (pVCpu->iem.s.cActiveMappings > 0)
13971 {
13972 Assert(rcStrict != VINF_SUCCESS);
13973 iemMemRollback(pVCpu);
13974 }
13975 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13976 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13977 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13978
13979//#ifdef DEBUG
13980// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
13981//#endif
13982
13983 /* Execute the next instruction as well if a cli, pop ss or
13984 mov ss, Gr has just completed successfully. */
13985 if ( fExecuteInhibit
13986 && rcStrict == VINF_SUCCESS
13987 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13988 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
13989 {
13990 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
13991 if (rcStrict == VINF_SUCCESS)
13992 {
13993#ifdef LOG_ENABLED
13994 iemLogCurInstr(pVCpu, false, pszFunction);
13995#endif
13996#ifdef IEM_WITH_SETJMP
13997 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13998 if ((rcStrict = setjmp(JmpBuf)) == 0)
13999 {
14000 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14001 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14002 }
14003 else
14004 pVCpu->iem.s.cLongJumps++;
14005 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14006#else
14007 IEM_OPCODE_GET_NEXT_U8(&b);
14008 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14009#endif
14010 if (rcStrict == VINF_SUCCESS)
14011 pVCpu->iem.s.cInstructions++;
14012 if (pVCpu->iem.s.cActiveMappings > 0)
14013 {
14014 Assert(rcStrict != VINF_SUCCESS);
14015 iemMemRollback(pVCpu);
14016 }
14017 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14018 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14019 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14020 }
14021 else if (pVCpu->iem.s.cActiveMappings > 0)
14022 iemMemRollback(pVCpu);
14023 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14024 }
14025
14026 /*
14027 * Return value fiddling, statistics and sanity assertions.
14028 */
14029 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14030
14031 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14032 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14033 return rcStrict;
14034}
14035
14036
14037#ifdef IN_RC
14038/**
14039 * Re-enters raw-mode or ensure we return to ring-3.
14040 *
14041 * @returns rcStrict, maybe modified.
14042 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14043 * @param rcStrict The status code returne by the interpreter.
14044 */
14045DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14046{
14047 if ( !pVCpu->iem.s.fInPatchCode
14048 && ( rcStrict == VINF_SUCCESS
14049 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14050 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14051 {
14052 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14053 CPUMRawEnter(pVCpu);
14054 else
14055 {
14056 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14057 rcStrict = VINF_EM_RESCHEDULE;
14058 }
14059 }
14060 return rcStrict;
14061}
14062#endif
14063
14064
14065/**
14066 * Execute one instruction.
14067 *
14068 * @return Strict VBox status code.
14069 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14070 */
14071VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14072{
14073#ifdef LOG_ENABLED
14074 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14075#endif
14076
14077 /*
14078 * Do the decoding and emulation.
14079 */
14080 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14081 if (rcStrict == VINF_SUCCESS)
14082 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14083 else if (pVCpu->iem.s.cActiveMappings > 0)
14084 iemMemRollback(pVCpu);
14085
14086#ifdef IN_RC
14087 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14088#endif
14089 if (rcStrict != VINF_SUCCESS)
14090 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14091 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14092 return rcStrict;
14093}
14094
14095
14096VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14097{
14098 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14099
14100 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14101 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14102 if (rcStrict == VINF_SUCCESS)
14103 {
14104 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14105 if (pcbWritten)
14106 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14107 }
14108 else if (pVCpu->iem.s.cActiveMappings > 0)
14109 iemMemRollback(pVCpu);
14110
14111#ifdef IN_RC
14112 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14113#endif
14114 return rcStrict;
14115}
14116
14117
14118VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14119 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14120{
14121 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14122
14123 VBOXSTRICTRC rcStrict;
14124 if ( cbOpcodeBytes
14125 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14126 {
14127 iemInitDecoder(pVCpu, false);
14128#ifdef IEM_WITH_CODE_TLB
14129 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14130 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14131 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14132 pVCpu->iem.s.offCurInstrStart = 0;
14133 pVCpu->iem.s.offInstrNextByte = 0;
14134#else
14135 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14136 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14137#endif
14138 rcStrict = VINF_SUCCESS;
14139 }
14140 else
14141 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14142 if (rcStrict == VINF_SUCCESS)
14143 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14144 else if (pVCpu->iem.s.cActiveMappings > 0)
14145 iemMemRollback(pVCpu);
14146
14147#ifdef IN_RC
14148 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14149#endif
14150 return rcStrict;
14151}
14152
14153
14154VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14155{
14156 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14157
14158 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14159 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14160 if (rcStrict == VINF_SUCCESS)
14161 {
14162 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14163 if (pcbWritten)
14164 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14165 }
14166 else if (pVCpu->iem.s.cActiveMappings > 0)
14167 iemMemRollback(pVCpu);
14168
14169#ifdef IN_RC
14170 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14171#endif
14172 return rcStrict;
14173}
14174
14175
14176VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14177 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14178{
14179 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14180
14181 VBOXSTRICTRC rcStrict;
14182 if ( cbOpcodeBytes
14183 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14184 {
14185 iemInitDecoder(pVCpu, true);
14186#ifdef IEM_WITH_CODE_TLB
14187 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14188 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14189 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14190 pVCpu->iem.s.offCurInstrStart = 0;
14191 pVCpu->iem.s.offInstrNextByte = 0;
14192#else
14193 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14194 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14195#endif
14196 rcStrict = VINF_SUCCESS;
14197 }
14198 else
14199 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14200 if (rcStrict == VINF_SUCCESS)
14201 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14202 else if (pVCpu->iem.s.cActiveMappings > 0)
14203 iemMemRollback(pVCpu);
14204
14205#ifdef IN_RC
14206 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14207#endif
14208 return rcStrict;
14209}
14210
14211
14212/**
14213 * For debugging DISGetParamSize, may come in handy.
14214 *
14215 * @returns Strict VBox status code.
14216 * @param pVCpu The cross context virtual CPU structure of the
14217 * calling EMT.
14218 * @param pCtxCore The context core structure.
14219 * @param OpcodeBytesPC The PC of the opcode bytes.
14220 * @param pvOpcodeBytes Prefeched opcode bytes.
14221 * @param cbOpcodeBytes Number of prefetched bytes.
14222 * @param pcbWritten Where to return the number of bytes written.
14223 * Optional.
14224 */
14225VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14226 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14227 uint32_t *pcbWritten)
14228{
14229 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14230
14231 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14232 VBOXSTRICTRC rcStrict;
14233 if ( cbOpcodeBytes
14234 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14235 {
14236 iemInitDecoder(pVCpu, true);
14237#ifdef IEM_WITH_CODE_TLB
14238 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14239 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14240 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14241 pVCpu->iem.s.offCurInstrStart = 0;
14242 pVCpu->iem.s.offInstrNextByte = 0;
14243#else
14244 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14245 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14246#endif
14247 rcStrict = VINF_SUCCESS;
14248 }
14249 else
14250 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14251 if (rcStrict == VINF_SUCCESS)
14252 {
14253 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14254 if (pcbWritten)
14255 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14256 }
14257 else if (pVCpu->iem.s.cActiveMappings > 0)
14258 iemMemRollback(pVCpu);
14259
14260#ifdef IN_RC
14261 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14262#endif
14263 return rcStrict;
14264}
14265
14266
14267VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14268{
14269 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14270
14271 /*
14272 * See if there is an interrupt pending in TRPM, inject it if we can.
14273 */
14274 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14275#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14276 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14277 if (fIntrEnabled)
14278 {
14279 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14280 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14281 else
14282 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14283 }
14284#else
14285 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14286#endif
14287 if ( fIntrEnabled
14288 && TRPMHasTrap(pVCpu)
14289 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14290 {
14291 uint8_t u8TrapNo;
14292 TRPMEVENT enmType;
14293 RTGCUINT uErrCode;
14294 RTGCPTR uCr2;
14295 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14296 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14297 TRPMResetTrap(pVCpu);
14298 }
14299
14300 /*
14301 * Initial decoder init w/ prefetch, then setup setjmp.
14302 */
14303 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14304 if (rcStrict == VINF_SUCCESS)
14305 {
14306#ifdef IEM_WITH_SETJMP
14307 jmp_buf JmpBuf;
14308 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14309 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14310 pVCpu->iem.s.cActiveMappings = 0;
14311 if ((rcStrict = setjmp(JmpBuf)) == 0)
14312#endif
14313 {
14314 /*
14315 * The run loop. We limit ourselves to 4096 instructions right now.
14316 */
14317 PVM pVM = pVCpu->CTX_SUFF(pVM);
14318 uint32_t cInstr = 4096;
14319 for (;;)
14320 {
14321 /*
14322 * Log the state.
14323 */
14324#ifdef LOG_ENABLED
14325 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14326#endif
14327
14328 /*
14329 * Do the decoding and emulation.
14330 */
14331 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14332 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14333 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14334 {
14335 Assert(pVCpu->iem.s.cActiveMappings == 0);
14336 pVCpu->iem.s.cInstructions++;
14337 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14338 {
14339 uint32_t fCpu = pVCpu->fLocalForcedActions
14340 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14341 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14342 | VMCPU_FF_TLB_FLUSH
14343#ifdef VBOX_WITH_RAW_MODE
14344 | VMCPU_FF_TRPM_SYNC_IDT
14345 | VMCPU_FF_SELM_SYNC_TSS
14346 | VMCPU_FF_SELM_SYNC_GDT
14347 | VMCPU_FF_SELM_SYNC_LDT
14348#endif
14349 | VMCPU_FF_INHIBIT_INTERRUPTS
14350 | VMCPU_FF_BLOCK_NMIS
14351 | VMCPU_FF_UNHALT ));
14352
14353 if (RT_LIKELY( ( !fCpu
14354 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14355 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14356 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) ))
14357 {
14358 if (cInstr-- > 0)
14359 {
14360 Assert(pVCpu->iem.s.cActiveMappings == 0);
14361 iemReInitDecoder(pVCpu);
14362 continue;
14363 }
14364 }
14365 }
14366 Assert(pVCpu->iem.s.cActiveMappings == 0);
14367 }
14368 else if (pVCpu->iem.s.cActiveMappings > 0)
14369 iemMemRollback(pVCpu);
14370 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14371 break;
14372 }
14373 }
14374#ifdef IEM_WITH_SETJMP
14375 else
14376 {
14377 if (pVCpu->iem.s.cActiveMappings > 0)
14378 iemMemRollback(pVCpu);
14379 pVCpu->iem.s.cLongJumps++;
14380 }
14381 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14382#endif
14383
14384 /*
14385 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14386 */
14387 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14388 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14389 }
14390 else
14391 {
14392 if (pVCpu->iem.s.cActiveMappings > 0)
14393 iemMemRollback(pVCpu);
14394
14395#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14396 /*
14397 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14398 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14399 */
14400 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14401#endif
14402 }
14403
14404 /*
14405 * Maybe re-enter raw-mode and log.
14406 */
14407#ifdef IN_RC
14408 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14409#endif
14410 if (rcStrict != VINF_SUCCESS)
14411 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14412 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14413 if (pcInstructions)
14414 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14415 return rcStrict;
14416}
14417
14418
14419/**
14420 * Interface used by EMExecuteExec, does exit statistics and limits.
14421 *
14422 * @returns Strict VBox status code.
14423 * @param pVCpu The cross context virtual CPU structure.
14424 * @param fWillExit To be defined.
14425 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14426 * @param cMaxInstructions Maximum number of instructions to execute.
14427 * @param cMaxInstructionsWithoutExits
14428 * The max number of instructions without exits.
14429 * @param pStats Where to return statistics.
14430 */
14431VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14432 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14433{
14434 NOREF(fWillExit); /** @todo define flexible exit crits */
14435
14436 /*
14437 * Initialize return stats.
14438 */
14439 pStats->cInstructions = 0;
14440 pStats->cExits = 0;
14441 pStats->cMaxExitDistance = 0;
14442 pStats->cReserved = 0;
14443
14444 /*
14445 * Initial decoder init w/ prefetch, then setup setjmp.
14446 */
14447 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14448 if (rcStrict == VINF_SUCCESS)
14449 {
14450#ifdef IEM_WITH_SETJMP
14451 jmp_buf JmpBuf;
14452 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14453 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14454 pVCpu->iem.s.cActiveMappings = 0;
14455 if ((rcStrict = setjmp(JmpBuf)) == 0)
14456#endif
14457 {
14458#ifdef IN_RING0
14459 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14460#endif
14461 uint32_t cInstructionSinceLastExit = 0;
14462
14463 /*
14464 * The run loop. We limit ourselves to 4096 instructions right now.
14465 */
14466 PVM pVM = pVCpu->CTX_SUFF(pVM);
14467 for (;;)
14468 {
14469 /*
14470 * Log the state.
14471 */
14472#ifdef LOG_ENABLED
14473 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14474#endif
14475
14476 /*
14477 * Do the decoding and emulation.
14478 */
14479 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14480
14481 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14482 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14483
14484 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14485 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14486 {
14487 pStats->cExits += 1;
14488 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14489 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14490 cInstructionSinceLastExit = 0;
14491 }
14492
14493 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14494 {
14495 Assert(pVCpu->iem.s.cActiveMappings == 0);
14496 pVCpu->iem.s.cInstructions++;
14497 pStats->cInstructions++;
14498 cInstructionSinceLastExit++;
14499 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14500 {
14501 uint32_t fCpu = pVCpu->fLocalForcedActions
14502 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14503 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14504 | VMCPU_FF_TLB_FLUSH
14505#ifdef VBOX_WITH_RAW_MODE
14506 | VMCPU_FF_TRPM_SYNC_IDT
14507 | VMCPU_FF_SELM_SYNC_TSS
14508 | VMCPU_FF_SELM_SYNC_GDT
14509 | VMCPU_FF_SELM_SYNC_LDT
14510#endif
14511 | VMCPU_FF_INHIBIT_INTERRUPTS
14512 | VMCPU_FF_BLOCK_NMIS
14513 | VMCPU_FF_UNHALT ));
14514
14515 if (RT_LIKELY( ( ( !fCpu
14516 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14517 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14518 && !VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) )
14519 || pStats->cInstructions < cMinInstructions))
14520 {
14521 if (pStats->cInstructions < cMaxInstructions)
14522 {
14523 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14524 {
14525#ifdef IN_RING0
14526 if ( !fCheckPreemptionPending
14527 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14528#endif
14529 {
14530 Assert(pVCpu->iem.s.cActiveMappings == 0);
14531 iemReInitDecoder(pVCpu);
14532 continue;
14533 }
14534#ifdef IN_RING0
14535 rcStrict = VINF_EM_RAW_INTERRUPT;
14536 break;
14537#endif
14538 }
14539 }
14540 }
14541 Assert(!(fCpu & VMCPU_FF_IEM));
14542 }
14543 Assert(pVCpu->iem.s.cActiveMappings == 0);
14544 }
14545 else if (pVCpu->iem.s.cActiveMappings > 0)
14546 iemMemRollback(pVCpu);
14547 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14548 break;
14549 }
14550 }
14551#ifdef IEM_WITH_SETJMP
14552 else
14553 {
14554 if (pVCpu->iem.s.cActiveMappings > 0)
14555 iemMemRollback(pVCpu);
14556 pVCpu->iem.s.cLongJumps++;
14557 }
14558 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14559#endif
14560
14561 /*
14562 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14563 */
14564 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14565 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14566 }
14567 else
14568 {
14569 if (pVCpu->iem.s.cActiveMappings > 0)
14570 iemMemRollback(pVCpu);
14571
14572#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14573 /*
14574 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14575 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14576 */
14577 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14578#endif
14579 }
14580
14581 /*
14582 * Maybe re-enter raw-mode and log.
14583 */
14584#ifdef IN_RC
14585 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14586#endif
14587 if (rcStrict != VINF_SUCCESS)
14588 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14589 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14590 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14591 return rcStrict;
14592}
14593
14594
14595/**
14596 * Injects a trap, fault, abort, software interrupt or external interrupt.
14597 *
14598 * The parameter list matches TRPMQueryTrapAll pretty closely.
14599 *
14600 * @returns Strict VBox status code.
14601 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14602 * @param u8TrapNo The trap number.
14603 * @param enmType What type is it (trap/fault/abort), software
14604 * interrupt or hardware interrupt.
14605 * @param uErrCode The error code if applicable.
14606 * @param uCr2 The CR2 value if applicable.
14607 * @param cbInstr The instruction length (only relevant for
14608 * software interrupts).
14609 */
14610VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14611 uint8_t cbInstr)
14612{
14613 iemInitDecoder(pVCpu, false);
14614#ifdef DBGFTRACE_ENABLED
14615 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14616 u8TrapNo, enmType, uErrCode, uCr2);
14617#endif
14618
14619 uint32_t fFlags;
14620 switch (enmType)
14621 {
14622 case TRPM_HARDWARE_INT:
14623 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14624 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14625 uErrCode = uCr2 = 0;
14626 break;
14627
14628 case TRPM_SOFTWARE_INT:
14629 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14630 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14631 uErrCode = uCr2 = 0;
14632 break;
14633
14634 case TRPM_TRAP:
14635 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14636 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14637 if (u8TrapNo == X86_XCPT_PF)
14638 fFlags |= IEM_XCPT_FLAGS_CR2;
14639 switch (u8TrapNo)
14640 {
14641 case X86_XCPT_DF:
14642 case X86_XCPT_TS:
14643 case X86_XCPT_NP:
14644 case X86_XCPT_SS:
14645 case X86_XCPT_PF:
14646 case X86_XCPT_AC:
14647 fFlags |= IEM_XCPT_FLAGS_ERR;
14648 break;
14649
14650 case X86_XCPT_NMI:
14651 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14652 break;
14653 }
14654 break;
14655
14656 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14657 }
14658
14659 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14660
14661 if (pVCpu->iem.s.cActiveMappings > 0)
14662 iemMemRollback(pVCpu);
14663
14664 return rcStrict;
14665}
14666
14667
14668/**
14669 * Injects the active TRPM event.
14670 *
14671 * @returns Strict VBox status code.
14672 * @param pVCpu The cross context virtual CPU structure.
14673 */
14674VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14675{
14676#ifndef IEM_IMPLEMENTS_TASKSWITCH
14677 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14678#else
14679 uint8_t u8TrapNo;
14680 TRPMEVENT enmType;
14681 RTGCUINT uErrCode;
14682 RTGCUINTPTR uCr2;
14683 uint8_t cbInstr;
14684 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14685 if (RT_FAILURE(rc))
14686 return rc;
14687
14688 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14689# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14690 if (rcStrict == VINF_SVM_VMEXIT)
14691 rcStrict = VINF_SUCCESS;
14692# endif
14693
14694 /** @todo Are there any other codes that imply the event was successfully
14695 * delivered to the guest? See @bugref{6607}. */
14696 if ( rcStrict == VINF_SUCCESS
14697 || rcStrict == VINF_IEM_RAISED_XCPT)
14698 TRPMResetTrap(pVCpu);
14699
14700 return rcStrict;
14701#endif
14702}
14703
14704
14705VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14706{
14707 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14708 return VERR_NOT_IMPLEMENTED;
14709}
14710
14711
14712VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14713{
14714 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14715 return VERR_NOT_IMPLEMENTED;
14716}
14717
14718
14719#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14720/**
14721 * Executes a IRET instruction with default operand size.
14722 *
14723 * This is for PATM.
14724 *
14725 * @returns VBox status code.
14726 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14727 * @param pCtxCore The register frame.
14728 */
14729VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14730{
14731 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14732
14733 iemCtxCoreToCtx(pCtx, pCtxCore);
14734 iemInitDecoder(pVCpu);
14735 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14736 if (rcStrict == VINF_SUCCESS)
14737 iemCtxToCtxCore(pCtxCore, pCtx);
14738 else
14739 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14740 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14741 return rcStrict;
14742}
14743#endif
14744
14745
14746/**
14747 * Macro used by the IEMExec* method to check the given instruction length.
14748 *
14749 * Will return on failure!
14750 *
14751 * @param a_cbInstr The given instruction length.
14752 * @param a_cbMin The minimum length.
14753 */
14754#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14755 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14756 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14757
14758
14759/**
14760 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14761 *
14762 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14763 *
14764 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14766 * @param rcStrict The status code to fiddle.
14767 */
14768DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14769{
14770 iemUninitExec(pVCpu);
14771#ifdef IN_RC
14772 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14773#else
14774 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14775#endif
14776}
14777
14778
14779/**
14780 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14781 *
14782 * This API ASSUMES that the caller has already verified that the guest code is
14783 * allowed to access the I/O port. (The I/O port is in the DX register in the
14784 * guest state.)
14785 *
14786 * @returns Strict VBox status code.
14787 * @param pVCpu The cross context virtual CPU structure.
14788 * @param cbValue The size of the I/O port access (1, 2, or 4).
14789 * @param enmAddrMode The addressing mode.
14790 * @param fRepPrefix Indicates whether a repeat prefix is used
14791 * (doesn't matter which for this instruction).
14792 * @param cbInstr The instruction length in bytes.
14793 * @param iEffSeg The effective segment address.
14794 * @param fIoChecked Whether the access to the I/O port has been
14795 * checked or not. It's typically checked in the
14796 * HM scenario.
14797 */
14798VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14799 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14800{
14801 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14802 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14803
14804 /*
14805 * State init.
14806 */
14807 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14808
14809 /*
14810 * Switch orgy for getting to the right handler.
14811 */
14812 VBOXSTRICTRC rcStrict;
14813 if (fRepPrefix)
14814 {
14815 switch (enmAddrMode)
14816 {
14817 case IEMMODE_16BIT:
14818 switch (cbValue)
14819 {
14820 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14821 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14822 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14823 default:
14824 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14825 }
14826 break;
14827
14828 case IEMMODE_32BIT:
14829 switch (cbValue)
14830 {
14831 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14832 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14833 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14834 default:
14835 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14836 }
14837 break;
14838
14839 case IEMMODE_64BIT:
14840 switch (cbValue)
14841 {
14842 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14843 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14844 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14845 default:
14846 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14847 }
14848 break;
14849
14850 default:
14851 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14852 }
14853 }
14854 else
14855 {
14856 switch (enmAddrMode)
14857 {
14858 case IEMMODE_16BIT:
14859 switch (cbValue)
14860 {
14861 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14862 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14863 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14864 default:
14865 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14866 }
14867 break;
14868
14869 case IEMMODE_32BIT:
14870 switch (cbValue)
14871 {
14872 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14873 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14874 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14875 default:
14876 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14877 }
14878 break;
14879
14880 case IEMMODE_64BIT:
14881 switch (cbValue)
14882 {
14883 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14884 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14885 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14886 default:
14887 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14888 }
14889 break;
14890
14891 default:
14892 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14893 }
14894 }
14895
14896 if (pVCpu->iem.s.cActiveMappings)
14897 iemMemRollback(pVCpu);
14898
14899 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14900}
14901
14902
14903/**
14904 * Interface for HM and EM for executing string I/O IN (read) instructions.
14905 *
14906 * This API ASSUMES that the caller has already verified that the guest code is
14907 * allowed to access the I/O port. (The I/O port is in the DX register in the
14908 * guest state.)
14909 *
14910 * @returns Strict VBox status code.
14911 * @param pVCpu The cross context virtual CPU structure.
14912 * @param cbValue The size of the I/O port access (1, 2, or 4).
14913 * @param enmAddrMode The addressing mode.
14914 * @param fRepPrefix Indicates whether a repeat prefix is used
14915 * (doesn't matter which for this instruction).
14916 * @param cbInstr The instruction length in bytes.
14917 * @param fIoChecked Whether the access to the I/O port has been
14918 * checked or not. It's typically checked in the
14919 * HM scenario.
14920 */
14921VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14922 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14923{
14924 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14925
14926 /*
14927 * State init.
14928 */
14929 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14930
14931 /*
14932 * Switch orgy for getting to the right handler.
14933 */
14934 VBOXSTRICTRC rcStrict;
14935 if (fRepPrefix)
14936 {
14937 switch (enmAddrMode)
14938 {
14939 case IEMMODE_16BIT:
14940 switch (cbValue)
14941 {
14942 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14943 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14944 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14945 default:
14946 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14947 }
14948 break;
14949
14950 case IEMMODE_32BIT:
14951 switch (cbValue)
14952 {
14953 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14954 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14955 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14956 default:
14957 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14958 }
14959 break;
14960
14961 case IEMMODE_64BIT:
14962 switch (cbValue)
14963 {
14964 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
14965 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
14966 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
14967 default:
14968 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14969 }
14970 break;
14971
14972 default:
14973 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14974 }
14975 }
14976 else
14977 {
14978 switch (enmAddrMode)
14979 {
14980 case IEMMODE_16BIT:
14981 switch (cbValue)
14982 {
14983 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14984 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14985 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14986 default:
14987 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14988 }
14989 break;
14990
14991 case IEMMODE_32BIT:
14992 switch (cbValue)
14993 {
14994 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14995 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14996 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14997 default:
14998 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14999 }
15000 break;
15001
15002 case IEMMODE_64BIT:
15003 switch (cbValue)
15004 {
15005 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15006 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15007 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15008 default:
15009 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15010 }
15011 break;
15012
15013 default:
15014 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15015 }
15016 }
15017
15018 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15019 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15020}
15021
15022
15023/**
15024 * Interface for rawmode to write execute an OUT instruction.
15025 *
15026 * @returns Strict VBox status code.
15027 * @param pVCpu The cross context virtual CPU structure.
15028 * @param cbInstr The instruction length in bytes.
15029 * @param u16Port The port to read.
15030 * @param fImm Whether the port is specified using an immediate operand or
15031 * using the implicit DX register.
15032 * @param cbReg The register size.
15033 *
15034 * @remarks In ring-0 not all of the state needs to be synced in.
15035 */
15036VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15037{
15038 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15039 Assert(cbReg <= 4 && cbReg != 3);
15040
15041 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15042 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15043 Assert(!pVCpu->iem.s.cActiveMappings);
15044 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15045}
15046
15047
15048/**
15049 * Interface for rawmode to write execute an IN instruction.
15050 *
15051 * @returns Strict VBox status code.
15052 * @param pVCpu The cross context virtual CPU structure.
15053 * @param cbInstr The instruction length in bytes.
15054 * @param u16Port The port to read.
15055 * @param fImm Whether the port is specified using an immediate operand or
15056 * using the implicit DX.
15057 * @param cbReg The register size.
15058 */
15059VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15060{
15061 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15062 Assert(cbReg <= 4 && cbReg != 3);
15063
15064 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15065 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15066 Assert(!pVCpu->iem.s.cActiveMappings);
15067 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15068}
15069
15070
15071/**
15072 * Interface for HM and EM to write to a CRx register.
15073 *
15074 * @returns Strict VBox status code.
15075 * @param pVCpu The cross context virtual CPU structure.
15076 * @param cbInstr The instruction length in bytes.
15077 * @param iCrReg The control register number (destination).
15078 * @param iGReg The general purpose register number (source).
15079 *
15080 * @remarks In ring-0 not all of the state needs to be synced in.
15081 */
15082VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15083{
15084 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15085 Assert(iCrReg < 16);
15086 Assert(iGReg < 16);
15087
15088 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15089 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15090 Assert(!pVCpu->iem.s.cActiveMappings);
15091 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15092}
15093
15094
15095/**
15096 * Interface for HM and EM to read from a CRx register.
15097 *
15098 * @returns Strict VBox status code.
15099 * @param pVCpu The cross context virtual CPU structure.
15100 * @param cbInstr The instruction length in bytes.
15101 * @param iGReg The general purpose register number (destination).
15102 * @param iCrReg The control register number (source).
15103 *
15104 * @remarks In ring-0 not all of the state needs to be synced in.
15105 */
15106VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15107{
15108 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15109 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15110 | CPUMCTX_EXTRN_APIC_TPR);
15111 Assert(iCrReg < 16);
15112 Assert(iGReg < 16);
15113
15114 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15115 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15116 Assert(!pVCpu->iem.s.cActiveMappings);
15117 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15118}
15119
15120
15121/**
15122 * Interface for HM and EM to clear the CR0[TS] bit.
15123 *
15124 * @returns Strict VBox status code.
15125 * @param pVCpu The cross context virtual CPU structure.
15126 * @param cbInstr The instruction length in bytes.
15127 *
15128 * @remarks In ring-0 not all of the state needs to be synced in.
15129 */
15130VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15131{
15132 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15133
15134 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15135 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15136 Assert(!pVCpu->iem.s.cActiveMappings);
15137 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15138}
15139
15140
15141/**
15142 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15143 *
15144 * @returns Strict VBox status code.
15145 * @param pVCpu The cross context virtual CPU structure.
15146 * @param cbInstr The instruction length in bytes.
15147 * @param uValue The value to load into CR0.
15148 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15149 * memory operand. Otherwise pass NIL_RTGCPTR.
15150 *
15151 * @remarks In ring-0 not all of the state needs to be synced in.
15152 */
15153VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15154{
15155 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15156
15157 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15158 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15159 Assert(!pVCpu->iem.s.cActiveMappings);
15160 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15161}
15162
15163
15164/**
15165 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15166 *
15167 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15168 *
15169 * @returns Strict VBox status code.
15170 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15171 * @param cbInstr The instruction length in bytes.
15172 * @remarks In ring-0 not all of the state needs to be synced in.
15173 * @thread EMT(pVCpu)
15174 */
15175VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15176{
15177 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15178
15179 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15180 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15181 Assert(!pVCpu->iem.s.cActiveMappings);
15182 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15183}
15184
15185
15186/**
15187 * Interface for HM and EM to emulate the WBINVD instruction.
15188 *
15189 * @returns Strict VBox status code.
15190 * @param pVCpu The cross context virtual CPU structure.
15191 * @param cbInstr The instruction length in bytes.
15192 *
15193 * @remarks In ring-0 not all of the state needs to be synced in.
15194 */
15195VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15196{
15197 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15198
15199 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15200 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15201 Assert(!pVCpu->iem.s.cActiveMappings);
15202 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15203}
15204
15205
15206/**
15207 * Interface for HM and EM to emulate the INVD instruction.
15208 *
15209 * @returns Strict VBox status code.
15210 * @param pVCpu The cross context virtual CPU structure.
15211 * @param cbInstr The instruction length in bytes.
15212 *
15213 * @remarks In ring-0 not all of the state needs to be synced in.
15214 */
15215VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15216{
15217 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15218
15219 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15220 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15221 Assert(!pVCpu->iem.s.cActiveMappings);
15222 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15223}
15224
15225
15226/**
15227 * Interface for HM and EM to emulate the INVLPG instruction.
15228 *
15229 * @returns Strict VBox status code.
15230 * @retval VINF_PGM_SYNC_CR3
15231 *
15232 * @param pVCpu The cross context virtual CPU structure.
15233 * @param cbInstr The instruction length in bytes.
15234 * @param GCPtrPage The effective address of the page to invalidate.
15235 *
15236 * @remarks In ring-0 not all of the state needs to be synced in.
15237 */
15238VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15239{
15240 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15241
15242 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15243 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15244 Assert(!pVCpu->iem.s.cActiveMappings);
15245 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15246}
15247
15248
15249/**
15250 * Interface for HM and EM to emulate the CPUID instruction.
15251 *
15252 * @returns Strict VBox status code.
15253 *
15254 * @param pVCpu The cross context virtual CPU structure.
15255 * @param cbInstr The instruction length in bytes.
15256 *
15257 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15258 */
15259VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15260{
15261 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15262 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15263
15264 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15265 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15266 Assert(!pVCpu->iem.s.cActiveMappings);
15267 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15268}
15269
15270
15271/**
15272 * Interface for HM and EM to emulate the RDPMC instruction.
15273 *
15274 * @returns Strict VBox status code.
15275 *
15276 * @param pVCpu The cross context virtual CPU structure.
15277 * @param cbInstr The instruction length in bytes.
15278 *
15279 * @remarks Not all of the state needs to be synced in.
15280 */
15281VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15282{
15283 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15284 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15285
15286 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15287 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15288 Assert(!pVCpu->iem.s.cActiveMappings);
15289 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15290}
15291
15292
15293/**
15294 * Interface for HM and EM to emulate the RDTSC instruction.
15295 *
15296 * @returns Strict VBox status code.
15297 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15298 *
15299 * @param pVCpu The cross context virtual CPU structure.
15300 * @param cbInstr The instruction length in bytes.
15301 *
15302 * @remarks Not all of the state needs to be synced in.
15303 */
15304VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15305{
15306 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15307 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15308
15309 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15310 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15311 Assert(!pVCpu->iem.s.cActiveMappings);
15312 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15313}
15314
15315
15316/**
15317 * Interface for HM and EM to emulate the RDTSCP instruction.
15318 *
15319 * @returns Strict VBox status code.
15320 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15321 *
15322 * @param pVCpu The cross context virtual CPU structure.
15323 * @param cbInstr The instruction length in bytes.
15324 *
15325 * @remarks Not all of the state needs to be synced in. Recommended
15326 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15327 */
15328VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15329{
15330 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15331 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15332
15333 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15334 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15335 Assert(!pVCpu->iem.s.cActiveMappings);
15336 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15337}
15338
15339
15340/**
15341 * Interface for HM and EM to emulate the RDMSR instruction.
15342 *
15343 * @returns Strict VBox status code.
15344 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15345 *
15346 * @param pVCpu The cross context virtual CPU structure.
15347 * @param cbInstr The instruction length in bytes.
15348 *
15349 * @remarks Not all of the state needs to be synced in. Requires RCX and
15350 * (currently) all MSRs.
15351 */
15352VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15353{
15354 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15355 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15356
15357 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15358 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15359 Assert(!pVCpu->iem.s.cActiveMappings);
15360 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15361}
15362
15363
15364/**
15365 * Interface for HM and EM to emulate the WRMSR instruction.
15366 *
15367 * @returns Strict VBox status code.
15368 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15369 *
15370 * @param pVCpu The cross context virtual CPU structure.
15371 * @param cbInstr The instruction length in bytes.
15372 *
15373 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15374 * and (currently) all MSRs.
15375 */
15376VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15377{
15378 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15379 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15380 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15381
15382 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15383 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15384 Assert(!pVCpu->iem.s.cActiveMappings);
15385 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15386}
15387
15388
15389/**
15390 * Interface for HM and EM to emulate the MONITOR instruction.
15391 *
15392 * @returns Strict VBox status code.
15393 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15394 *
15395 * @param pVCpu The cross context virtual CPU structure.
15396 * @param cbInstr The instruction length in bytes.
15397 *
15398 * @remarks Not all of the state needs to be synced in.
15399 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15400 * are used.
15401 */
15402VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15403{
15404 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15405 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15406
15407 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15408 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15409 Assert(!pVCpu->iem.s.cActiveMappings);
15410 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15411}
15412
15413
15414/**
15415 * Interface for HM and EM to emulate the MWAIT instruction.
15416 *
15417 * @returns Strict VBox status code.
15418 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15419 *
15420 * @param pVCpu The cross context virtual CPU structure.
15421 * @param cbInstr The instruction length in bytes.
15422 *
15423 * @remarks Not all of the state needs to be synced in.
15424 */
15425VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15426{
15427 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15428
15429 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15430 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15431 Assert(!pVCpu->iem.s.cActiveMappings);
15432 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15433}
15434
15435
15436/**
15437 * Interface for HM and EM to emulate the HLT instruction.
15438 *
15439 * @returns Strict VBox status code.
15440 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15441 *
15442 * @param pVCpu The cross context virtual CPU structure.
15443 * @param cbInstr The instruction length in bytes.
15444 *
15445 * @remarks Not all of the state needs to be synced in.
15446 */
15447VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15448{
15449 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15450
15451 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15452 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15453 Assert(!pVCpu->iem.s.cActiveMappings);
15454 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15455}
15456
15457
15458/**
15459 * Checks if IEM is in the process of delivering an event (interrupt or
15460 * exception).
15461 *
15462 * @returns true if we're in the process of raising an interrupt or exception,
15463 * false otherwise.
15464 * @param pVCpu The cross context virtual CPU structure.
15465 * @param puVector Where to store the vector associated with the
15466 * currently delivered event, optional.
15467 * @param pfFlags Where to store th event delivery flags (see
15468 * IEM_XCPT_FLAGS_XXX), optional.
15469 * @param puErr Where to store the error code associated with the
15470 * event, optional.
15471 * @param puCr2 Where to store the CR2 associated with the event,
15472 * optional.
15473 * @remarks The caller should check the flags to determine if the error code and
15474 * CR2 are valid for the event.
15475 */
15476VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15477{
15478 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15479 if (fRaisingXcpt)
15480 {
15481 if (puVector)
15482 *puVector = pVCpu->iem.s.uCurXcpt;
15483 if (pfFlags)
15484 *pfFlags = pVCpu->iem.s.fCurXcpt;
15485 if (puErr)
15486 *puErr = pVCpu->iem.s.uCurXcptErr;
15487 if (puCr2)
15488 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15489 }
15490 return fRaisingXcpt;
15491}
15492
15493#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15494
15495/**
15496 * Interface for HM and EM to emulate the CLGI instruction.
15497 *
15498 * @returns Strict VBox status code.
15499 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15500 * @param cbInstr The instruction length in bytes.
15501 * @thread EMT(pVCpu)
15502 */
15503VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15504{
15505 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15506
15507 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15508 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15509 Assert(!pVCpu->iem.s.cActiveMappings);
15510 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15511}
15512
15513
15514/**
15515 * Interface for HM and EM to emulate the STGI instruction.
15516 *
15517 * @returns Strict VBox status code.
15518 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15519 * @param cbInstr The instruction length in bytes.
15520 * @thread EMT(pVCpu)
15521 */
15522VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15523{
15524 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15525
15526 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15527 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15528 Assert(!pVCpu->iem.s.cActiveMappings);
15529 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15530}
15531
15532
15533/**
15534 * Interface for HM and EM to emulate the VMLOAD instruction.
15535 *
15536 * @returns Strict VBox status code.
15537 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15538 * @param cbInstr The instruction length in bytes.
15539 * @thread EMT(pVCpu)
15540 */
15541VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15542{
15543 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15544
15545 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15546 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15547 Assert(!pVCpu->iem.s.cActiveMappings);
15548 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15549}
15550
15551
15552/**
15553 * Interface for HM and EM to emulate the VMSAVE instruction.
15554 *
15555 * @returns Strict VBox status code.
15556 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15557 * @param cbInstr The instruction length in bytes.
15558 * @thread EMT(pVCpu)
15559 */
15560VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15561{
15562 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15563
15564 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15565 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15566 Assert(!pVCpu->iem.s.cActiveMappings);
15567 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15568}
15569
15570
15571/**
15572 * Interface for HM and EM to emulate the INVLPGA instruction.
15573 *
15574 * @returns Strict VBox status code.
15575 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15576 * @param cbInstr The instruction length in bytes.
15577 * @thread EMT(pVCpu)
15578 */
15579VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15580{
15581 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15582
15583 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15584 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15585 Assert(!pVCpu->iem.s.cActiveMappings);
15586 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15587}
15588
15589
15590/**
15591 * Interface for HM and EM to emulate the VMRUN instruction.
15592 *
15593 * @returns Strict VBox status code.
15594 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15595 * @param cbInstr The instruction length in bytes.
15596 * @thread EMT(pVCpu)
15597 */
15598VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15599{
15600 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15601 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15602
15603 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15604 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15605 Assert(!pVCpu->iem.s.cActiveMappings);
15606 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15607}
15608
15609
15610/**
15611 * Interface for HM and EM to emulate \#VMEXIT.
15612 *
15613 * @returns Strict VBox status code.
15614 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15615 * @param uExitCode The exit code.
15616 * @param uExitInfo1 The exit info. 1 field.
15617 * @param uExitInfo2 The exit info. 2 field.
15618 * @thread EMT(pVCpu)
15619 */
15620VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15621{
15622 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15623 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15624 if (pVCpu->iem.s.cActiveMappings)
15625 iemMemRollback(pVCpu);
15626 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15627}
15628
15629#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15630
15631#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15632
15633/**
15634 * Interface for HM and EM to emulate the VMREAD instruction.
15635 *
15636 * @returns Strict VBox status code.
15637 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15638 * @param pExitInfo Pointer to the VM-exit information struct.
15639 * @thread EMT(pVCpu)
15640 */
15641VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15642{
15643 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15644 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15645 Assert(pExitInfo);
15646
15647 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15648
15649 VBOXSTRICTRC rcStrict;
15650 uint8_t const cbInstr = pExitInfo->cbInstr;
15651 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15652 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15653 {
15654 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15655 {
15656 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15657 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15658 }
15659 else
15660 {
15661 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15662 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15663 }
15664 }
15665 else
15666 {
15667 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15668 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15669 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15670 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15671 }
15672 if (pVCpu->iem.s.cActiveMappings)
15673 iemMemRollback(pVCpu);
15674 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15675}
15676
15677
15678/**
15679 * Interface for HM and EM to emulate the VMWRITE instruction.
15680 *
15681 * @returns Strict VBox status code.
15682 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15683 * @param pExitInfo Pointer to the VM-exit information struct.
15684 * @thread EMT(pVCpu)
15685 */
15686VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15687{
15688 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15689 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15690 Assert(pExitInfo);
15691
15692 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15693
15694 uint64_t u64Val;
15695 uint8_t iEffSeg;
15696 IEMMODE enmEffAddrMode;
15697 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15698 {
15699 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15700 iEffSeg = UINT8_MAX;
15701 enmEffAddrMode = UINT8_MAX;
15702 }
15703 else
15704 {
15705 u64Val = pExitInfo->GCPtrEffAddr;
15706 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15707 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15708 }
15709 uint8_t const cbInstr = pExitInfo->cbInstr;
15710 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15711 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15712 if (pVCpu->iem.s.cActiveMappings)
15713 iemMemRollback(pVCpu);
15714 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15715}
15716
15717
15718/**
15719 * Interface for HM and EM to emulate the VMPTRLD instruction.
15720 *
15721 * @returns Strict VBox status code.
15722 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15723 * @param pExitInfo Pointer to the VM-exit information struct.
15724 * @thread EMT(pVCpu)
15725 */
15726VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15727{
15728 Assert(pExitInfo);
15729 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15730 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15731
15732 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15733
15734 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15735 uint8_t const cbInstr = pExitInfo->cbInstr;
15736 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15737 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15738 if (pVCpu->iem.s.cActiveMappings)
15739 iemMemRollback(pVCpu);
15740 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15741}
15742
15743
15744/**
15745 * Interface for HM and EM to emulate the VMPTRST instruction.
15746 *
15747 * @returns Strict VBox status code.
15748 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15749 * @param pExitInfo Pointer to the VM-exit information struct.
15750 * @thread EMT(pVCpu)
15751 */
15752VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15753{
15754 Assert(pExitInfo);
15755 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15756 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15757
15758 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15759
15760 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15761 uint8_t const cbInstr = pExitInfo->cbInstr;
15762 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15763 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15764 if (pVCpu->iem.s.cActiveMappings)
15765 iemMemRollback(pVCpu);
15766 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15767}
15768
15769
15770/**
15771 * Interface for HM and EM to emulate the VMCLEAR instruction.
15772 *
15773 * @returns Strict VBox status code.
15774 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15775 * @param pExitInfo Pointer to the VM-exit information struct.
15776 * @thread EMT(pVCpu)
15777 */
15778VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15779{
15780 Assert(pExitInfo);
15781 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15782 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15783
15784 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15785
15786 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15787 uint8_t const cbInstr = pExitInfo->cbInstr;
15788 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15789 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15790 if (pVCpu->iem.s.cActiveMappings)
15791 iemMemRollback(pVCpu);
15792 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15793}
15794
15795
15796/**
15797 * Interface for HM and EM to emulate the VMXON instruction.
15798 *
15799 * @returns Strict VBox status code.
15800 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15801 * @param pExitInfo Pointer to the VM-exit information struct.
15802 * @thread EMT(pVCpu)
15803 */
15804VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15805{
15806 Assert(pExitInfo);
15807 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15808 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HWVIRT);
15809
15810 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15811
15812 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15813 uint8_t const cbInstr = pExitInfo->cbInstr;
15814 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15815 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15816 if (pVCpu->iem.s.cActiveMappings)
15817 iemMemRollback(pVCpu);
15818 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15819}
15820
15821
15822/**
15823 * Interface for HM and EM to emulate the VMXOFF instruction.
15824 *
15825 * @returns Strict VBox status code.
15826 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15827 * @param cbInstr The instruction length in bytes.
15828 * @thread EMT(pVCpu)
15829 */
15830VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15831{
15832 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15833
15834 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15835 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15836 Assert(!pVCpu->iem.s.cActiveMappings);
15837 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15838}
15839
15840#endif
15841
15842#ifdef IN_RING3
15843
15844/**
15845 * Handles the unlikely and probably fatal merge cases.
15846 *
15847 * @returns Merged status code.
15848 * @param rcStrict Current EM status code.
15849 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15850 * with @a rcStrict.
15851 * @param iMemMap The memory mapping index. For error reporting only.
15852 * @param pVCpu The cross context virtual CPU structure of the calling
15853 * thread, for error reporting only.
15854 */
15855DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
15856 unsigned iMemMap, PVMCPU pVCpu)
15857{
15858 if (RT_FAILURE_NP(rcStrict))
15859 return rcStrict;
15860
15861 if (RT_FAILURE_NP(rcStrictCommit))
15862 return rcStrictCommit;
15863
15864 if (rcStrict == rcStrictCommit)
15865 return rcStrictCommit;
15866
15867 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
15868 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
15869 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
15870 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
15871 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
15872 return VERR_IOM_FF_STATUS_IPE;
15873}
15874
15875
15876/**
15877 * Helper for IOMR3ProcessForceFlag.
15878 *
15879 * @returns Merged status code.
15880 * @param rcStrict Current EM status code.
15881 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
15882 * with @a rcStrict.
15883 * @param iMemMap The memory mapping index. For error reporting only.
15884 * @param pVCpu The cross context virtual CPU structure of the calling
15885 * thread, for error reporting only.
15886 */
15887DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
15888{
15889 /* Simple. */
15890 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
15891 return rcStrictCommit;
15892
15893 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
15894 return rcStrict;
15895
15896 /* EM scheduling status codes. */
15897 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
15898 && rcStrict <= VINF_EM_LAST))
15899 {
15900 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
15901 && rcStrictCommit <= VINF_EM_LAST))
15902 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
15903 }
15904
15905 /* Unlikely */
15906 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
15907}
15908
15909
15910/**
15911 * Called by force-flag handling code when VMCPU_FF_IEM is set.
15912 *
15913 * @returns Merge between @a rcStrict and what the commit operation returned.
15914 * @param pVM The cross context VM structure.
15915 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15916 * @param rcStrict The status code returned by ring-0 or raw-mode.
15917 */
15918VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
15919{
15920 /*
15921 * Reset the pending commit.
15922 */
15923 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
15924 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
15925 ("%#x %#x %#x\n",
15926 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15927 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
15928
15929 /*
15930 * Commit the pending bounce buffers (usually just one).
15931 */
15932 unsigned cBufs = 0;
15933 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
15934 while (iMemMap-- > 0)
15935 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
15936 {
15937 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
15938 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
15939 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
15940
15941 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
15942 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
15943 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
15944
15945 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
15946 {
15947 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
15948 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
15949 pbBuf,
15950 cbFirst,
15951 PGMACCESSORIGIN_IEM);
15952 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
15953 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
15954 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
15955 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
15956 }
15957
15958 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
15959 {
15960 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
15961 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
15962 pbBuf + cbFirst,
15963 cbSecond,
15964 PGMACCESSORIGIN_IEM);
15965 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
15966 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
15967 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
15968 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
15969 }
15970 cBufs++;
15971 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
15972 }
15973
15974 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
15975 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
15976 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
15977 pVCpu->iem.s.cActiveMappings = 0;
15978 return rcStrict;
15979}
15980
15981#endif /* IN_RING3 */
15982
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette