VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 75323

Last change on this file since 75323 was 75320, checked in by vboxsync, 6 years ago

VMM/IEM: Nested VMX: bugref:9180 Added APIC memory access VM-exits. Might be more places to cover.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 638.1 KB
Line 
1/* $Id: IEMAll.cpp 75320 2018-11-08 12:16:27Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#include <VBox/vmm/tm.h>
105#include <VBox/vmm/dbgf.h>
106#include <VBox/vmm/dbgftrace.h>
107#ifdef VBOX_WITH_RAW_MODE_NOT_R0
108# include <VBox/vmm/patm.h>
109# if defined(VBOX_WITH_CALL_RECORD) || defined(REM_MONITOR_CODE_PAGES)
110# include <VBox/vmm/csam.h>
111# endif
112#endif
113#include "IEMInternal.h"
114#include <VBox/vmm/vm.h>
115#include <VBox/log.h>
116#include <VBox/err.h>
117#include <VBox/param.h>
118#include <VBox/dis.h>
119#include <VBox/disopcode.h>
120#include <iprt/asm-math.h>
121#include <iprt/assert.h>
122#include <iprt/string.h>
123#include <iprt/x86.h>
124
125
126/*********************************************************************************************************************************
127* Structures and Typedefs *
128*********************************************************************************************************************************/
129/** @typedef PFNIEMOP
130 * Pointer to an opcode decoder function.
131 */
132
133/** @def FNIEMOP_DEF
134 * Define an opcode decoder function.
135 *
136 * We're using macors for this so that adding and removing parameters as well as
137 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
138 *
139 * @param a_Name The function name.
140 */
141
142/** @typedef PFNIEMOPRM
143 * Pointer to an opcode decoder function with RM byte.
144 */
145
146/** @def FNIEMOPRM_DEF
147 * Define an opcode decoder function with RM byte.
148 *
149 * We're using macors for this so that adding and removing parameters as well as
150 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
151 *
152 * @param a_Name The function name.
153 */
154
155#if defined(__GNUC__) && defined(RT_ARCH_X86)
156typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPU pVCpu);
157typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
158# define FNIEMOP_DEF(a_Name) \
159 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu)
160# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
161 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
162# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
163 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
164
165#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
166typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPU pVCpu);
167typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
168# define FNIEMOP_DEF(a_Name) \
169 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
170# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
171 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
172# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
173 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
174
175#elif defined(__GNUC__)
176typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
177typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
178# define FNIEMOP_DEF(a_Name) \
179 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu)
180# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
181 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0)
182# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
183 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
184
185#else
186typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
187typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
188# define FNIEMOP_DEF(a_Name) \
189 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
190# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
191 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
192# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
193 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
194
195#endif
196#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
197
198
199/**
200 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
201 */
202typedef union IEMSELDESC
203{
204 /** The legacy view. */
205 X86DESC Legacy;
206 /** The long mode view. */
207 X86DESC64 Long;
208} IEMSELDESC;
209/** Pointer to a selector descriptor table entry. */
210typedef IEMSELDESC *PIEMSELDESC;
211
212/**
213 * CPU exception classes.
214 */
215typedef enum IEMXCPTCLASS
216{
217 IEMXCPTCLASS_BENIGN,
218 IEMXCPTCLASS_CONTRIBUTORY,
219 IEMXCPTCLASS_PAGE_FAULT,
220 IEMXCPTCLASS_DOUBLE_FAULT
221} IEMXCPTCLASS;
222
223
224/*********************************************************************************************************************************
225* Defined Constants And Macros *
226*********************************************************************************************************************************/
227/** @def IEM_WITH_SETJMP
228 * Enables alternative status code handling using setjmps.
229 *
230 * This adds a bit of expense via the setjmp() call since it saves all the
231 * non-volatile registers. However, it eliminates return code checks and allows
232 * for more optimal return value passing (return regs instead of stack buffer).
233 */
234#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
235# define IEM_WITH_SETJMP
236#endif
237
238/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
239 * due to GCC lacking knowledge about the value range of a switch. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
241
242/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
243#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
244
245/**
246 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
247 * occation.
248 */
249#ifdef LOG_ENABLED
250# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
251 do { \
252 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
253 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
254 } while (0)
255#else
256# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
257 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
258#endif
259
260/**
261 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
262 * occation using the supplied logger statement.
263 *
264 * @param a_LoggerArgs What to log on failure.
265 */
266#ifdef LOG_ENABLED
267# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
268 do { \
269 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
270 /*LogFunc(a_LoggerArgs);*/ \
271 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
272 } while (0)
273#else
274# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
275 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
276#endif
277
278/**
279 * Call an opcode decoder function.
280 *
281 * We're using macors for this so that adding and removing parameters can be
282 * done as we please. See FNIEMOP_DEF.
283 */
284#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
285
286/**
287 * Call a common opcode decoder function taking one extra argument.
288 *
289 * We're using macors for this so that adding and removing parameters can be
290 * done as we please. See FNIEMOP_DEF_1.
291 */
292#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
293
294/**
295 * Call a common opcode decoder function taking one extra argument.
296 *
297 * We're using macors for this so that adding and removing parameters can be
298 * done as we please. See FNIEMOP_DEF_1.
299 */
300#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
301
302/**
303 * Check if we're currently executing in real or virtual 8086 mode.
304 *
305 * @returns @c true if it is, @c false if not.
306 * @param a_pVCpu The IEM state of the current CPU.
307 */
308#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
309
310/**
311 * Check if we're currently executing in virtual 8086 mode.
312 *
313 * @returns @c true if it is, @c false if not.
314 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
315 */
316#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
317
318/**
319 * Check if we're currently executing in long mode.
320 *
321 * @returns @c true if it is, @c false if not.
322 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
323 */
324#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
325
326/**
327 * Check if we're currently executing in a 64-bit code segment.
328 *
329 * @returns @c true if it is, @c false if not.
330 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
331 */
332#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
333
334/**
335 * Check if we're currently executing in real mode.
336 *
337 * @returns @c true if it is, @c false if not.
338 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
339 */
340#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
341
342/**
343 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
344 * @returns PCCPUMFEATURES
345 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
346 */
347#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
348
349/**
350 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
351 * @returns PCCPUMFEATURES
352 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
353 */
354#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
355
356/**
357 * Evaluates to true if we're presenting an Intel CPU to the guest.
358 */
359#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
360
361/**
362 * Evaluates to true if we're presenting an AMD CPU to the guest.
363 */
364#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD )
365
366/**
367 * Check if the address is canonical.
368 */
369#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
370
371/**
372 * Gets the effective VEX.VVVV value.
373 *
374 * The 4th bit is ignored if not 64-bit code.
375 * @returns effective V-register value.
376 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
377 */
378#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
379 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
380
381/** @def IEM_USE_UNALIGNED_DATA_ACCESS
382 * Use unaligned accesses instead of elaborate byte assembly. */
383#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
384# define IEM_USE_UNALIGNED_DATA_ACCESS
385#endif
386
387#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
388
389/**
390 * Check if the guest has entered VMX root operation.
391 */
392# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
393
394/**
395 * Check if the guest has entered VMX non-root operation.
396 */
397# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
398
399/**
400 * Check if the nested-guest has the given Pin-based VM-execution control set.
401 */
402# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
403 (CPUMIsGuestVmxPinCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
404
405/**
406 * Check if the nested-guest has the given Processor-based VM-execution control set.
407 */
408#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
409 (CPUMIsGuestVmxProcCtlsSet((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
410
411/**
412 * Check if the nested-guest has the given Secondary Processor-based VM-execution
413 * control set.
414 */
415#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
416 (CPUMIsGuestVmxProcCtls2Set((a_pVCpu), IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
417
418/**
419 * Invokes the VMX VM-exit handler for an instruction intercept.
420 */
421# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
422 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
423
424/**
425 * Invokes the VMX VM-exit handler for an instruction intercept where the
426 * instruction provides additional VM-exit information.
427 */
428# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
429 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
430
431/**
432 * Invokes the VMX VM-exit handler for a task switch.
433 */
434# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
435 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
436
437/**
438 * Invokes the VMX VM-exit handler for MWAIT.
439 */
440# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
441 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
442
443/**
444 * Invokes the VMX VM-exit handle for triple faults.
445 */
446# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) \
447 do { return iemVmxVmexitTripleFault(a_pVCpu); } while (0)
448
449# define IEM_VMX_VMEXIT_APIC_ACCESS_RET(a_pVCpu, a_offAccess, a_fAccess) \
450 do { return iemVmxVmexitApicAccess((a_pVCpu), (a_offAccess), (a_fAccess)); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu) do { return VERR_VMX_IPE_1; } while (0)
463# define IEM_VMX_VMEXIT_APIC_ACCESS_RET(a_pVCpu, a_offAccess, a_fAccess) do { return VERR_VMX_IPE_1; } while (0)
464
465#endif
466
467#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
468/**
469 * Check if an SVM control/instruction intercept is set.
470 */
471# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
472 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
473
474/**
475 * Check if an SVM read CRx intercept is set.
476 */
477# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
478 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
479
480/**
481 * Check if an SVM write CRx intercept is set.
482 */
483# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
484 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
485
486/**
487 * Check if an SVM read DRx intercept is set.
488 */
489# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
490 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
491
492/**
493 * Check if an SVM write DRx intercept is set.
494 */
495# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
496 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
497
498/**
499 * Check if an SVM exception intercept is set.
500 */
501# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
502 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
503
504/**
505 * Invokes the SVM \#VMEXIT handler for the nested-guest.
506 */
507# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
508 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
509
510/**
511 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
512 * corresponding decode assist information.
513 */
514# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
515 do \
516 { \
517 uint64_t uExitInfo1; \
518 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
519 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
520 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
521 else \
522 uExitInfo1 = 0; \
523 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
524 } while (0)
525
526/** Check and handles SVM nested-guest instruction intercept and updates
527 * NRIP if needed.
528 */
529# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
530 do \
531 { \
532 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
533 { \
534 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
535 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
536 } \
537 } while (0)
538
539/** Checks and handles SVM nested-guest CR0 read intercept. */
540# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
541 do \
542 { \
543 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
544 { /* probably likely */ } \
545 else \
546 { \
547 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
548 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
549 } \
550 } while (0)
551
552/**
553 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
554 */
555# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
556 do { \
557 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
558 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
559 } while (0)
560
561#else
562# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
563# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
565# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
567# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
568# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
570# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
572# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
573
574#endif
575
576
577/*********************************************************************************************************************************
578* Global Variables *
579*********************************************************************************************************************************/
580extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
581
582
583/** Function table for the ADD instruction. */
584IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
585{
586 iemAImpl_add_u8, iemAImpl_add_u8_locked,
587 iemAImpl_add_u16, iemAImpl_add_u16_locked,
588 iemAImpl_add_u32, iemAImpl_add_u32_locked,
589 iemAImpl_add_u64, iemAImpl_add_u64_locked
590};
591
592/** Function table for the ADC instruction. */
593IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
594{
595 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
596 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
597 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
598 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
599};
600
601/** Function table for the SUB instruction. */
602IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
603{
604 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
605 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
606 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
607 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
608};
609
610/** Function table for the SBB instruction. */
611IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
612{
613 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
614 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
615 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
616 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
617};
618
619/** Function table for the OR instruction. */
620IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
621{
622 iemAImpl_or_u8, iemAImpl_or_u8_locked,
623 iemAImpl_or_u16, iemAImpl_or_u16_locked,
624 iemAImpl_or_u32, iemAImpl_or_u32_locked,
625 iemAImpl_or_u64, iemAImpl_or_u64_locked
626};
627
628/** Function table for the XOR instruction. */
629IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
630{
631 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
632 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
633 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
634 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
635};
636
637/** Function table for the AND instruction. */
638IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
639{
640 iemAImpl_and_u8, iemAImpl_and_u8_locked,
641 iemAImpl_and_u16, iemAImpl_and_u16_locked,
642 iemAImpl_and_u32, iemAImpl_and_u32_locked,
643 iemAImpl_and_u64, iemAImpl_and_u64_locked
644};
645
646/** Function table for the CMP instruction.
647 * @remarks Making operand order ASSUMPTIONS.
648 */
649IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
650{
651 iemAImpl_cmp_u8, NULL,
652 iemAImpl_cmp_u16, NULL,
653 iemAImpl_cmp_u32, NULL,
654 iemAImpl_cmp_u64, NULL
655};
656
657/** Function table for the TEST instruction.
658 * @remarks Making operand order ASSUMPTIONS.
659 */
660IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
661{
662 iemAImpl_test_u8, NULL,
663 iemAImpl_test_u16, NULL,
664 iemAImpl_test_u32, NULL,
665 iemAImpl_test_u64, NULL
666};
667
668/** Function table for the BT instruction. */
669IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
670{
671 NULL, NULL,
672 iemAImpl_bt_u16, NULL,
673 iemAImpl_bt_u32, NULL,
674 iemAImpl_bt_u64, NULL
675};
676
677/** Function table for the BTC instruction. */
678IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
679{
680 NULL, NULL,
681 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
682 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
683 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
684};
685
686/** Function table for the BTR instruction. */
687IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
688{
689 NULL, NULL,
690 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
691 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
692 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
693};
694
695/** Function table for the BTS instruction. */
696IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
697{
698 NULL, NULL,
699 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
700 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
701 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
702};
703
704/** Function table for the BSF instruction. */
705IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
706{
707 NULL, NULL,
708 iemAImpl_bsf_u16, NULL,
709 iemAImpl_bsf_u32, NULL,
710 iemAImpl_bsf_u64, NULL
711};
712
713/** Function table for the BSR instruction. */
714IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
715{
716 NULL, NULL,
717 iemAImpl_bsr_u16, NULL,
718 iemAImpl_bsr_u32, NULL,
719 iemAImpl_bsr_u64, NULL
720};
721
722/** Function table for the IMUL instruction. */
723IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
724{
725 NULL, NULL,
726 iemAImpl_imul_two_u16, NULL,
727 iemAImpl_imul_two_u32, NULL,
728 iemAImpl_imul_two_u64, NULL
729};
730
731/** Group 1 /r lookup table. */
732IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
733{
734 &g_iemAImpl_add,
735 &g_iemAImpl_or,
736 &g_iemAImpl_adc,
737 &g_iemAImpl_sbb,
738 &g_iemAImpl_and,
739 &g_iemAImpl_sub,
740 &g_iemAImpl_xor,
741 &g_iemAImpl_cmp
742};
743
744/** Function table for the INC instruction. */
745IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
746{
747 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
748 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
749 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
750 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
751};
752
753/** Function table for the DEC instruction. */
754IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
755{
756 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
757 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
758 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
759 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
760};
761
762/** Function table for the NEG instruction. */
763IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
764{
765 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
766 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
767 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
768 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
769};
770
771/** Function table for the NOT instruction. */
772IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
773{
774 iemAImpl_not_u8, iemAImpl_not_u8_locked,
775 iemAImpl_not_u16, iemAImpl_not_u16_locked,
776 iemAImpl_not_u32, iemAImpl_not_u32_locked,
777 iemAImpl_not_u64, iemAImpl_not_u64_locked
778};
779
780
781/** Function table for the ROL instruction. */
782IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
783{
784 iemAImpl_rol_u8,
785 iemAImpl_rol_u16,
786 iemAImpl_rol_u32,
787 iemAImpl_rol_u64
788};
789
790/** Function table for the ROR instruction. */
791IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
792{
793 iemAImpl_ror_u8,
794 iemAImpl_ror_u16,
795 iemAImpl_ror_u32,
796 iemAImpl_ror_u64
797};
798
799/** Function table for the RCL instruction. */
800IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
801{
802 iemAImpl_rcl_u8,
803 iemAImpl_rcl_u16,
804 iemAImpl_rcl_u32,
805 iemAImpl_rcl_u64
806};
807
808/** Function table for the RCR instruction. */
809IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
810{
811 iemAImpl_rcr_u8,
812 iemAImpl_rcr_u16,
813 iemAImpl_rcr_u32,
814 iemAImpl_rcr_u64
815};
816
817/** Function table for the SHL instruction. */
818IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
819{
820 iemAImpl_shl_u8,
821 iemAImpl_shl_u16,
822 iemAImpl_shl_u32,
823 iemAImpl_shl_u64
824};
825
826/** Function table for the SHR instruction. */
827IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
828{
829 iemAImpl_shr_u8,
830 iemAImpl_shr_u16,
831 iemAImpl_shr_u32,
832 iemAImpl_shr_u64
833};
834
835/** Function table for the SAR instruction. */
836IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
837{
838 iemAImpl_sar_u8,
839 iemAImpl_sar_u16,
840 iemAImpl_sar_u32,
841 iemAImpl_sar_u64
842};
843
844
845/** Function table for the MUL instruction. */
846IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
847{
848 iemAImpl_mul_u8,
849 iemAImpl_mul_u16,
850 iemAImpl_mul_u32,
851 iemAImpl_mul_u64
852};
853
854/** Function table for the IMUL instruction working implicitly on rAX. */
855IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
856{
857 iemAImpl_imul_u8,
858 iemAImpl_imul_u16,
859 iemAImpl_imul_u32,
860 iemAImpl_imul_u64
861};
862
863/** Function table for the DIV instruction. */
864IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
865{
866 iemAImpl_div_u8,
867 iemAImpl_div_u16,
868 iemAImpl_div_u32,
869 iemAImpl_div_u64
870};
871
872/** Function table for the MUL instruction. */
873IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
874{
875 iemAImpl_idiv_u8,
876 iemAImpl_idiv_u16,
877 iemAImpl_idiv_u32,
878 iemAImpl_idiv_u64
879};
880
881/** Function table for the SHLD instruction */
882IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
883{
884 iemAImpl_shld_u16,
885 iemAImpl_shld_u32,
886 iemAImpl_shld_u64,
887};
888
889/** Function table for the SHRD instruction */
890IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
891{
892 iemAImpl_shrd_u16,
893 iemAImpl_shrd_u32,
894 iemAImpl_shrd_u64,
895};
896
897
898/** Function table for the PUNPCKLBW instruction */
899IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
900/** Function table for the PUNPCKLBD instruction */
901IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
902/** Function table for the PUNPCKLDQ instruction */
903IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
904/** Function table for the PUNPCKLQDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
906
907/** Function table for the PUNPCKHBW instruction */
908IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
909/** Function table for the PUNPCKHBD instruction */
910IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
911/** Function table for the PUNPCKHDQ instruction */
912IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
913/** Function table for the PUNPCKHQDQ instruction */
914IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
915
916/** Function table for the PXOR instruction */
917IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
918/** Function table for the PCMPEQB instruction */
919IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
920/** Function table for the PCMPEQW instruction */
921IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
922/** Function table for the PCMPEQD instruction */
923IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
924
925
926#if defined(IEM_LOG_MEMORY_WRITES)
927/** What IEM just wrote. */
928uint8_t g_abIemWrote[256];
929/** How much IEM just wrote. */
930size_t g_cbIemWrote;
931#endif
932
933
934/*********************************************************************************************************************************
935* Internal Functions *
936*********************************************************************************************************************************/
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPU pVCpu);
940IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel);
941/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel);
945IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPU pVCpu);
948IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL uSel);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel);
951IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
952IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
953IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPU pVCpu);
954#ifdef IEM_WITH_SETJMP
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel);
959DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess);
960#endif
961
962IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
971IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
972IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value);
975IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value);
976IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel);
977IEM_STATIC uint16_t iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg);
978IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg);
979
980#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
981IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPU pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
982IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPU pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTripleFault(PVMCPU pVCpu);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitPreemptTimer(PVMCPU pVCpu);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending);
986IEM_STATIC VBOXSTRICTRC iemVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector);
987IEM_STATIC VBOXSTRICTRC iemVmxVmexitInitIpi(PVMCPU pVCpu);
988IEM_STATIC VBOXSTRICTRC iemVmxVmexitIntWindow(PVMCPU pVCpu);
989IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPU pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
990IEM_STATIC VBOXSTRICTRC iemVmxVmexitApicAccess(PVMCPU pVCpu, uint16_t offAccess, uint32_t fAccess);
991#endif
992
993#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
994IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
995IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPU pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
996#endif
997
998
999/**
1000 * Sets the pass up status.
1001 *
1002 * @returns VINF_SUCCESS.
1003 * @param pVCpu The cross context virtual CPU structure of the
1004 * calling thread.
1005 * @param rcPassUp The pass up status. Must be informational.
1006 * VINF_SUCCESS is not allowed.
1007 */
1008IEM_STATIC int iemSetPassUpStatus(PVMCPU pVCpu, VBOXSTRICTRC rcPassUp)
1009{
1010 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1011
1012 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1013 if (rcOldPassUp == VINF_SUCCESS)
1014 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1015 /* If both are EM scheduling codes, use EM priority rules. */
1016 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1017 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1018 {
1019 if (rcPassUp < rcOldPassUp)
1020 {
1021 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1023 }
1024 else
1025 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1026 }
1027 /* Override EM scheduling with specific status code. */
1028 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1029 {
1030 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1031 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1032 }
1033 /* Don't override specific status code, first come first served. */
1034 else
1035 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1036 return VINF_SUCCESS;
1037}
1038
1039
1040/**
1041 * Calculates the CPU mode.
1042 *
1043 * This is mainly for updating IEMCPU::enmCpuMode.
1044 *
1045 * @returns CPU mode.
1046 * @param pVCpu The cross context virtual CPU structure of the
1047 * calling thread.
1048 */
1049DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPU pVCpu)
1050{
1051 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1052 return IEMMODE_64BIT;
1053 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1054 return IEMMODE_32BIT;
1055 return IEMMODE_16BIT;
1056}
1057
1058
1059/**
1060 * Initializes the execution state.
1061 *
1062 * @param pVCpu The cross context virtual CPU structure of the
1063 * calling thread.
1064 * @param fBypassHandlers Whether to bypass access handlers.
1065 *
1066 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1067 * side-effects in strict builds.
1068 */
1069DECLINLINE(void) iemInitExec(PVMCPU pVCpu, bool fBypassHandlers)
1070{
1071 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1072 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1073
1074#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1082 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1083#endif
1084
1085#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1086 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1087#endif
1088 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1089 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1090#ifdef VBOX_STRICT
1091 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1092 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1093 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1094 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1095 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1096 pVCpu->iem.s.uRexReg = 127;
1097 pVCpu->iem.s.uRexB = 127;
1098 pVCpu->iem.s.offModRm = 127;
1099 pVCpu->iem.s.uRexIndex = 127;
1100 pVCpu->iem.s.iEffSeg = 127;
1101 pVCpu->iem.s.idxPrefix = 127;
1102 pVCpu->iem.s.uVex3rdReg = 127;
1103 pVCpu->iem.s.uVexLength = 127;
1104 pVCpu->iem.s.fEvexStuff = 127;
1105 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1106# ifdef IEM_WITH_CODE_TLB
1107 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1108 pVCpu->iem.s.pbInstrBuf = NULL;
1109 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1110 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1111 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1112 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1113# else
1114 pVCpu->iem.s.offOpcode = 127;
1115 pVCpu->iem.s.cbOpcode = 127;
1116# endif
1117#endif
1118
1119 pVCpu->iem.s.cActiveMappings = 0;
1120 pVCpu->iem.s.iNextMapping = 0;
1121 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1122 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1123#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1124 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1125 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1126 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1127 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1128 if (!pVCpu->iem.s.fInPatchCode)
1129 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1130#endif
1131}
1132
1133#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1134/**
1135 * Performs a minimal reinitialization of the execution state.
1136 *
1137 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1138 * 'world-switch' types operations on the CPU. Currently only nested
1139 * hardware-virtualization uses it.
1140 *
1141 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1142 */
1143IEM_STATIC void iemReInitExec(PVMCPU pVCpu)
1144{
1145 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1146 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1147
1148 pVCpu->iem.s.uCpl = uCpl;
1149 pVCpu->iem.s.enmCpuMode = enmMode;
1150 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1151 pVCpu->iem.s.enmEffAddrMode = enmMode;
1152 if (enmMode != IEMMODE_64BIT)
1153 {
1154 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1155 pVCpu->iem.s.enmEffOpSize = enmMode;
1156 }
1157 else
1158 {
1159 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1160 pVCpu->iem.s.enmEffOpSize = enmMode;
1161 }
1162 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1163#ifndef IEM_WITH_CODE_TLB
1164 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1165 pVCpu->iem.s.offOpcode = 0;
1166 pVCpu->iem.s.cbOpcode = 0;
1167#endif
1168 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1169}
1170#endif
1171
1172/**
1173 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1174 *
1175 * @param pVCpu The cross context virtual CPU structure of the
1176 * calling thread.
1177 */
1178DECLINLINE(void) iemUninitExec(PVMCPU pVCpu)
1179{
1180 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1181#ifdef VBOX_STRICT
1182# ifdef IEM_WITH_CODE_TLB
1183 NOREF(pVCpu);
1184# else
1185 pVCpu->iem.s.cbOpcode = 0;
1186# endif
1187#else
1188 NOREF(pVCpu);
1189#endif
1190}
1191
1192
1193/**
1194 * Initializes the decoder state.
1195 *
1196 * iemReInitDecoder is mostly a copy of this function.
1197 *
1198 * @param pVCpu The cross context virtual CPU structure of the
1199 * calling thread.
1200 * @param fBypassHandlers Whether to bypass access handlers.
1201 */
1202DECLINLINE(void) iemInitDecoder(PVMCPU pVCpu, bool fBypassHandlers)
1203{
1204 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1205 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1206
1207#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1216#endif
1217
1218#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1219 CPUMGuestLazyLoadHiddenCsAndSs(pVCpu);
1220#endif
1221 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1222 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1223 pVCpu->iem.s.enmCpuMode = enmMode;
1224 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1225 pVCpu->iem.s.enmEffAddrMode = enmMode;
1226 if (enmMode != IEMMODE_64BIT)
1227 {
1228 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1229 pVCpu->iem.s.enmEffOpSize = enmMode;
1230 }
1231 else
1232 {
1233 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1234 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1235 }
1236 pVCpu->iem.s.fPrefixes = 0;
1237 pVCpu->iem.s.uRexReg = 0;
1238 pVCpu->iem.s.uRexB = 0;
1239 pVCpu->iem.s.uRexIndex = 0;
1240 pVCpu->iem.s.idxPrefix = 0;
1241 pVCpu->iem.s.uVex3rdReg = 0;
1242 pVCpu->iem.s.uVexLength = 0;
1243 pVCpu->iem.s.fEvexStuff = 0;
1244 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1245#ifdef IEM_WITH_CODE_TLB
1246 pVCpu->iem.s.pbInstrBuf = NULL;
1247 pVCpu->iem.s.offInstrNextByte = 0;
1248 pVCpu->iem.s.offCurInstrStart = 0;
1249# ifdef VBOX_STRICT
1250 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1251 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1252 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1253# endif
1254#else
1255 pVCpu->iem.s.offOpcode = 0;
1256 pVCpu->iem.s.cbOpcode = 0;
1257#endif
1258 pVCpu->iem.s.offModRm = 0;
1259 pVCpu->iem.s.cActiveMappings = 0;
1260 pVCpu->iem.s.iNextMapping = 0;
1261 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1262 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1263#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1264 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1265 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1266 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1267 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1268 if (!pVCpu->iem.s.fInPatchCode)
1269 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1270#endif
1271
1272#ifdef DBGFTRACE_ENABLED
1273 switch (enmMode)
1274 {
1275 case IEMMODE_64BIT:
1276 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1277 break;
1278 case IEMMODE_32BIT:
1279 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1280 break;
1281 case IEMMODE_16BIT:
1282 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1283 break;
1284 }
1285#endif
1286}
1287
1288
1289/**
1290 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1291 *
1292 * This is mostly a copy of iemInitDecoder.
1293 *
1294 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1295 */
1296DECLINLINE(void) iemReInitDecoder(PVMCPU pVCpu)
1297{
1298 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1299
1300#if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0)
1301 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1302 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1305 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1306 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1307 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1308 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1309#endif
1310
1311 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1312 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1313 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1314 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1315 pVCpu->iem.s.enmEffAddrMode = enmMode;
1316 if (enmMode != IEMMODE_64BIT)
1317 {
1318 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1319 pVCpu->iem.s.enmEffOpSize = enmMode;
1320 }
1321 else
1322 {
1323 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1324 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1325 }
1326 pVCpu->iem.s.fPrefixes = 0;
1327 pVCpu->iem.s.uRexReg = 0;
1328 pVCpu->iem.s.uRexB = 0;
1329 pVCpu->iem.s.uRexIndex = 0;
1330 pVCpu->iem.s.idxPrefix = 0;
1331 pVCpu->iem.s.uVex3rdReg = 0;
1332 pVCpu->iem.s.uVexLength = 0;
1333 pVCpu->iem.s.fEvexStuff = 0;
1334 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1335#ifdef IEM_WITH_CODE_TLB
1336 if (pVCpu->iem.s.pbInstrBuf)
1337 {
1338 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1339 - pVCpu->iem.s.uInstrBufPc;
1340 if (off < pVCpu->iem.s.cbInstrBufTotal)
1341 {
1342 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1343 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1344 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1345 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1346 else
1347 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1348 }
1349 else
1350 {
1351 pVCpu->iem.s.pbInstrBuf = NULL;
1352 pVCpu->iem.s.offInstrNextByte = 0;
1353 pVCpu->iem.s.offCurInstrStart = 0;
1354 pVCpu->iem.s.cbInstrBuf = 0;
1355 pVCpu->iem.s.cbInstrBufTotal = 0;
1356 }
1357 }
1358 else
1359 {
1360 pVCpu->iem.s.offInstrNextByte = 0;
1361 pVCpu->iem.s.offCurInstrStart = 0;
1362 pVCpu->iem.s.cbInstrBuf = 0;
1363 pVCpu->iem.s.cbInstrBufTotal = 0;
1364 }
1365#else
1366 pVCpu->iem.s.cbOpcode = 0;
1367 pVCpu->iem.s.offOpcode = 0;
1368#endif
1369 pVCpu->iem.s.offModRm = 0;
1370 Assert(pVCpu->iem.s.cActiveMappings == 0);
1371 pVCpu->iem.s.iNextMapping = 0;
1372 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1373 Assert(pVCpu->iem.s.fBypassHandlers == false);
1374#ifdef VBOX_WITH_RAW_MODE_NOT_R0
1375 if (!pVCpu->iem.s.fInPatchCode)
1376 { /* likely */ }
1377 else
1378 {
1379 pVCpu->iem.s.fInPatchCode = pVCpu->iem.s.uCpl == 0
1380 && pVCpu->cpum.GstCtx.cs.u64Base == 0
1381 && pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX
1382 && PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip);
1383 if (!pVCpu->iem.s.fInPatchCode)
1384 CPUMRawLeave(pVCpu, VINF_SUCCESS);
1385 }
1386#endif
1387
1388#ifdef DBGFTRACE_ENABLED
1389 switch (enmMode)
1390 {
1391 case IEMMODE_64BIT:
1392 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1393 break;
1394 case IEMMODE_32BIT:
1395 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1396 break;
1397 case IEMMODE_16BIT:
1398 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1399 break;
1400 }
1401#endif
1402}
1403
1404
1405
1406/**
1407 * Prefetch opcodes the first time when starting executing.
1408 *
1409 * @returns Strict VBox status code.
1410 * @param pVCpu The cross context virtual CPU structure of the
1411 * calling thread.
1412 * @param fBypassHandlers Whether to bypass access handlers.
1413 */
1414IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers)
1415{
1416 iemInitDecoder(pVCpu, fBypassHandlers);
1417
1418#ifdef IEM_WITH_CODE_TLB
1419 /** @todo Do ITLB lookup here. */
1420
1421#else /* !IEM_WITH_CODE_TLB */
1422
1423 /*
1424 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1425 *
1426 * First translate CS:rIP to a physical address.
1427 */
1428 uint32_t cbToTryRead;
1429 RTGCPTR GCPtrPC;
1430 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1431 {
1432 cbToTryRead = PAGE_SIZE;
1433 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1434 if (IEM_IS_CANONICAL(GCPtrPC))
1435 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1436 else
1437 return iemRaiseGeneralProtectionFault0(pVCpu);
1438 }
1439 else
1440 {
1441 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1442 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1443 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1444 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1445 else
1446 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1447 if (cbToTryRead) { /* likely */ }
1448 else /* overflowed */
1449 {
1450 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1451 cbToTryRead = UINT32_MAX;
1452 }
1453 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1454 Assert(GCPtrPC <= UINT32_MAX);
1455 }
1456
1457# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1458 /* Allow interpretation of patch manager code blocks since they can for
1459 instance throw #PFs for perfectly good reasons. */
1460 if (pVCpu->iem.s.fInPatchCode)
1461 {
1462 size_t cbRead = 0;
1463 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbRead);
1464 AssertRCReturn(rc, rc);
1465 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
1466 return VINF_SUCCESS;
1467 }
1468# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1469
1470 RTGCPHYS GCPhys;
1471 uint64_t fFlags;
1472 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1473 if (RT_SUCCESS(rc)) { /* probable */ }
1474 else
1475 {
1476 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1477 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1478 }
1479 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1480 else
1481 {
1482 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1483 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1484 }
1485 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1486 else
1487 {
1488 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1489 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1490 }
1491 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1492 /** @todo Check reserved bits and such stuff. PGM is better at doing
1493 * that, so do it when implementing the guest virtual address
1494 * TLB... */
1495
1496 /*
1497 * Read the bytes at this address.
1498 */
1499 PVM pVM = pVCpu->CTX_SUFF(pVM);
1500# if defined(IN_RING3) && defined(VBOX_WITH_RAW_MODE_NOT_R0)
1501 size_t cbActual;
1502 if ( PATMIsEnabled(pVM)
1503 && RT_SUCCESS(PATMR3ReadOrgInstr(pVM, GCPtrPC, pVCpu->iem.s.abOpcode, sizeof(pVCpu->iem.s.abOpcode), &cbActual)))
1504 {
1505 Log4(("decode - Read %u unpatched bytes at %RGv\n", cbActual, GCPtrPC));
1506 Assert(cbActual > 0);
1507 pVCpu->iem.s.cbOpcode = (uint8_t)cbActual;
1508 }
1509 else
1510# endif
1511 {
1512 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1513 if (cbToTryRead > cbLeftOnPage)
1514 cbToTryRead = cbLeftOnPage;
1515 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1516 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1517
1518 if (!pVCpu->iem.s.fBypassHandlers)
1519 {
1520 VBOXSTRICTRC rcStrict = PGMPhysRead(pVM, GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1521 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1522 { /* likely */ }
1523 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1524 {
1525 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1526 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1527 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1528 }
1529 else
1530 {
1531 Log((RT_SUCCESS(rcStrict)
1532 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1533 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1534 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1535 return rcStrict;
1536 }
1537 }
1538 else
1539 {
1540 rc = PGMPhysSimpleReadGCPhys(pVM, pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1541 if (RT_SUCCESS(rc))
1542 { /* likely */ }
1543 else
1544 {
1545 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1546 GCPtrPC, GCPhys, rc, cbToTryRead));
1547 return rc;
1548 }
1549 }
1550 pVCpu->iem.s.cbOpcode = cbToTryRead;
1551 }
1552#endif /* !IEM_WITH_CODE_TLB */
1553 return VINF_SUCCESS;
1554}
1555
1556
1557/**
1558 * Invalidates the IEM TLBs.
1559 *
1560 * This is called internally as well as by PGM when moving GC mappings.
1561 *
1562 * @returns
1563 * @param pVCpu The cross context virtual CPU structure of the calling
1564 * thread.
1565 * @param fVmm Set when PGM calls us with a remapping.
1566 */
1567VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPU pVCpu, bool fVmm)
1568{
1569#ifdef IEM_WITH_CODE_TLB
1570 pVCpu->iem.s.cbInstrBufTotal = 0;
1571 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1572 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1573 { /* very likely */ }
1574 else
1575 {
1576 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1577 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1578 while (i-- > 0)
1579 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1580 }
1581#endif
1582
1583#ifdef IEM_WITH_DATA_TLB
1584 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1585 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1586 { /* very likely */ }
1587 else
1588 {
1589 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1590 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1591 while (i-- > 0)
1592 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1593 }
1594#endif
1595 NOREF(pVCpu); NOREF(fVmm);
1596}
1597
1598
1599/**
1600 * Invalidates a page in the TLBs.
1601 *
1602 * @param pVCpu The cross context virtual CPU structure of the calling
1603 * thread.
1604 * @param GCPtr The address of the page to invalidate
1605 */
1606VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPU pVCpu, RTGCPTR GCPtr)
1607{
1608#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1609 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1610 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1611 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1612 uintptr_t idx = (uint8_t)GCPtr;
1613
1614# ifdef IEM_WITH_CODE_TLB
1615 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1616 {
1617 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1618 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1619 pVCpu->iem.s.cbInstrBufTotal = 0;
1620 }
1621# endif
1622
1623# ifdef IEM_WITH_DATA_TLB
1624 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1625 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1626# endif
1627#else
1628 NOREF(pVCpu); NOREF(GCPtr);
1629#endif
1630}
1631
1632
1633/**
1634 * Invalidates the host physical aspects of the IEM TLBs.
1635 *
1636 * This is called internally as well as by PGM when moving GC mappings.
1637 *
1638 * @param pVCpu The cross context virtual CPU structure of the calling
1639 * thread.
1640 */
1641VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPU pVCpu)
1642{
1643#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1644 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1645
1646# ifdef IEM_WITH_CODE_TLB
1647 pVCpu->iem.s.cbInstrBufTotal = 0;
1648# endif
1649 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1650 if (uTlbPhysRev != 0)
1651 {
1652 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1653 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1654 }
1655 else
1656 {
1657 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1658 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1659
1660 unsigned i;
1661# ifdef IEM_WITH_CODE_TLB
1662 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1663 while (i-- > 0)
1664 {
1665 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1666 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1667 }
1668# endif
1669# ifdef IEM_WITH_DATA_TLB
1670 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1671 while (i-- > 0)
1672 {
1673 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1674 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1675 }
1676# endif
1677 }
1678#else
1679 NOREF(pVCpu);
1680#endif
1681}
1682
1683
1684/**
1685 * Invalidates the host physical aspects of the IEM TLBs.
1686 *
1687 * This is called internally as well as by PGM when moving GC mappings.
1688 *
1689 * @param pVM The cross context VM structure.
1690 *
1691 * @remarks Caller holds the PGM lock.
1692 */
1693VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1694{
1695 RT_NOREF_PV(pVM);
1696}
1697
1698#ifdef IEM_WITH_CODE_TLB
1699
1700/**
1701 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1702 * failure and jumps.
1703 *
1704 * We end up here for a number of reasons:
1705 * - pbInstrBuf isn't yet initialized.
1706 * - Advancing beyond the buffer boundrary (e.g. cross page).
1707 * - Advancing beyond the CS segment limit.
1708 * - Fetching from non-mappable page (e.g. MMIO).
1709 *
1710 * @param pVCpu The cross context virtual CPU structure of the
1711 * calling thread.
1712 * @param pvDst Where to return the bytes.
1713 * @param cbDst Number of bytes to read.
1714 *
1715 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1716 */
1717IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPU pVCpu, size_t cbDst, void *pvDst)
1718{
1719#ifdef IN_RING3
1720 for (;;)
1721 {
1722 Assert(cbDst <= 8);
1723 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1724
1725 /*
1726 * We might have a partial buffer match, deal with that first to make the
1727 * rest simpler. This is the first part of the cross page/buffer case.
1728 */
1729 if (pVCpu->iem.s.pbInstrBuf != NULL)
1730 {
1731 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1732 {
1733 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1734 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1735 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1736
1737 cbDst -= cbCopy;
1738 pvDst = (uint8_t *)pvDst + cbCopy;
1739 offBuf += cbCopy;
1740 pVCpu->iem.s.offInstrNextByte += offBuf;
1741 }
1742 }
1743
1744 /*
1745 * Check segment limit, figuring how much we're allowed to access at this point.
1746 *
1747 * We will fault immediately if RIP is past the segment limit / in non-canonical
1748 * territory. If we do continue, there are one or more bytes to read before we
1749 * end up in trouble and we need to do that first before faulting.
1750 */
1751 RTGCPTR GCPtrFirst;
1752 uint32_t cbMaxRead;
1753 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1754 {
1755 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1756 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1757 { /* likely */ }
1758 else
1759 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1760 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1761 }
1762 else
1763 {
1764 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1765 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1766 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1767 { /* likely */ }
1768 else
1769 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1770 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1771 if (cbMaxRead != 0)
1772 { /* likely */ }
1773 else
1774 {
1775 /* Overflowed because address is 0 and limit is max. */
1776 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1777 cbMaxRead = X86_PAGE_SIZE;
1778 }
1779 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1780 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1781 if (cbMaxRead2 < cbMaxRead)
1782 cbMaxRead = cbMaxRead2;
1783 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1784 }
1785
1786 /*
1787 * Get the TLB entry for this piece of code.
1788 */
1789 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1790 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1791 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1792 if (pTlbe->uTag == uTag)
1793 {
1794 /* likely when executing lots of code, otherwise unlikely */
1795# ifdef VBOX_WITH_STATISTICS
1796 pVCpu->iem.s.CodeTlb.cTlbHits++;
1797# endif
1798 }
1799 else
1800 {
1801 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1802# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1803 if (PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip))
1804 {
1805 pTlbe->uTag = uTag;
1806 pTlbe->fFlagsAndPhysRev = IEMTLBE_F_PATCH_CODE | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_USER
1807 | IEMTLBE_F_PT_NO_WRITE | IEMTLBE_F_PT_NO_DIRTY | IEMTLBE_F_NO_MAPPINGR3;
1808 pTlbe->GCPhys = NIL_RTGCPHYS;
1809 pTlbe->pbMappingR3 = NULL;
1810 }
1811 else
1812# endif
1813 {
1814 RTGCPHYS GCPhys;
1815 uint64_t fFlags;
1816 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1817 if (RT_FAILURE(rc))
1818 {
1819 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1820 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1821 }
1822
1823 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1824 pTlbe->uTag = uTag;
1825 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1826 pTlbe->GCPhys = GCPhys;
1827 pTlbe->pbMappingR3 = NULL;
1828 }
1829 }
1830
1831 /*
1832 * Check TLB page table level access flags.
1833 */
1834 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1835 {
1836 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1837 {
1838 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1839 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1840 }
1841 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1842 {
1843 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1844 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1845 }
1846 }
1847
1848# ifdef VBOX_WITH_RAW_MODE_NOT_R0
1849 /*
1850 * Allow interpretation of patch manager code blocks since they can for
1851 * instance throw #PFs for perfectly good reasons.
1852 */
1853 if (!(pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PATCH_CODE))
1854 { /* no unlikely */ }
1855 else
1856 {
1857 /** @todo Could be optimized this a little in ring-3 if we liked. */
1858 size_t cbRead = 0;
1859 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrFirst, pvDst, cbDst, &cbRead);
1860 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1861 AssertStmt(cbRead == cbDst, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_IEM_IPE_1));
1862 return;
1863 }
1864# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
1865
1866 /*
1867 * Look up the physical page info if necessary.
1868 */
1869 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1870 { /* not necessary */ }
1871 else
1872 {
1873 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1874 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1875 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1876 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1877 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1878 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1879 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1880 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1881 }
1882
1883# if defined(IN_RING3) || (defined(IN_RING0) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE))
1884 /*
1885 * Try do a direct read using the pbMappingR3 pointer.
1886 */
1887 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1888 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1889 {
1890 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1891 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1892 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1893 {
1894 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1895 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1896 }
1897 else
1898 {
1899 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1900 Assert(cbInstr < cbMaxRead);
1901 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1902 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1903 }
1904 if (cbDst <= cbMaxRead)
1905 {
1906 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1907 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1908 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1909 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1910 return;
1911 }
1912 pVCpu->iem.s.pbInstrBuf = NULL;
1913
1914 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1915 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1916 }
1917 else
1918# endif
1919#if 0
1920 /*
1921 * If there is no special read handling, so we can read a bit more and
1922 * put it in the prefetch buffer.
1923 */
1924 if ( cbDst < cbMaxRead
1925 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1926 {
1927 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1928 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1929 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1930 { /* likely */ }
1931 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1932 {
1933 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1934 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1935 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1936 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1937 }
1938 else
1939 {
1940 Log((RT_SUCCESS(rcStrict)
1941 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1942 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1943 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1944 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1945 }
1946 }
1947 /*
1948 * Special read handling, so only read exactly what's needed.
1949 * This is a highly unlikely scenario.
1950 */
1951 else
1952#endif
1953 {
1954 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1955 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1956 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1957 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1958 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1959 { /* likely */ }
1960 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1961 {
1962 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1963 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1964 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1965 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1966 }
1967 else
1968 {
1969 Log((RT_SUCCESS(rcStrict)
1970 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1971 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1972 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1973 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1974 }
1975 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1976 if (cbToRead == cbDst)
1977 return;
1978 }
1979
1980 /*
1981 * More to read, loop.
1982 */
1983 cbDst -= cbMaxRead;
1984 pvDst = (uint8_t *)pvDst + cbMaxRead;
1985 }
1986#else
1987 RT_NOREF(pvDst, cbDst);
1988 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1989#endif
1990}
1991
1992#else
1993
1994/**
1995 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1996 * exception if it fails.
1997 *
1998 * @returns Strict VBox status code.
1999 * @param pVCpu The cross context virtual CPU structure of the
2000 * calling thread.
2001 * @param cbMin The minimum number of bytes relative offOpcode
2002 * that must be read.
2003 */
2004IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPU pVCpu, size_t cbMin)
2005{
2006 /*
2007 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
2008 *
2009 * First translate CS:rIP to a physical address.
2010 */
2011 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
2012 uint32_t cbToTryRead;
2013 RTGCPTR GCPtrNext;
2014 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
2015 {
2016 cbToTryRead = PAGE_SIZE;
2017 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
2018 if (!IEM_IS_CANONICAL(GCPtrNext))
2019 return iemRaiseGeneralProtectionFault0(pVCpu);
2020 }
2021 else
2022 {
2023 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
2024 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
2025 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
2026 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
2027 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2028 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
2029 if (!cbToTryRead) /* overflowed */
2030 {
2031 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
2032 cbToTryRead = UINT32_MAX;
2033 /** @todo check out wrapping around the code segment. */
2034 }
2035 if (cbToTryRead < cbMin - cbLeft)
2036 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
2037 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
2038 }
2039
2040 /* Only read up to the end of the page, and make sure we don't read more
2041 than the opcode buffer can hold. */
2042 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
2043 if (cbToTryRead > cbLeftOnPage)
2044 cbToTryRead = cbLeftOnPage;
2045 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
2046 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
2047/** @todo r=bird: Convert assertion into undefined opcode exception? */
2048 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
2049
2050# ifdef VBOX_WITH_RAW_MODE_NOT_R0
2051 /* Allow interpretation of patch manager code blocks since they can for
2052 instance throw #PFs for perfectly good reasons. */
2053 if (pVCpu->iem.s.fInPatchCode)
2054 {
2055 size_t cbRead = 0;
2056 int rc = PATMReadPatchCode(pVCpu->CTX_SUFF(pVM), GCPtrNext, pVCpu->iem.s.abOpcode, cbToTryRead, &cbRead);
2057 AssertRCReturn(rc, rc);
2058 pVCpu->iem.s.cbOpcode = (uint8_t)cbRead; Assert(pVCpu->iem.s.cbOpcode == cbRead); Assert(cbRead > 0);
2059 return VINF_SUCCESS;
2060 }
2061# endif /* VBOX_WITH_RAW_MODE_NOT_R0 */
2062
2063 RTGCPHYS GCPhys;
2064 uint64_t fFlags;
2065 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
2066 if (RT_FAILURE(rc))
2067 {
2068 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
2069 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
2070 }
2071 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
2072 {
2073 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
2074 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2075 }
2076 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
2077 {
2078 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
2079 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
2080 }
2081 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
2082 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
2083 /** @todo Check reserved bits and such stuff. PGM is better at doing
2084 * that, so do it when implementing the guest virtual address
2085 * TLB... */
2086
2087 /*
2088 * Read the bytes at this address.
2089 *
2090 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2091 * and since PATM should only patch the start of an instruction there
2092 * should be no need to check again here.
2093 */
2094 if (!pVCpu->iem.s.fBypassHandlers)
2095 {
2096 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2097 cbToTryRead, PGMACCESSORIGIN_IEM);
2098 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2099 { /* likely */ }
2100 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2101 {
2102 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2103 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2104 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2105 }
2106 else
2107 {
2108 Log((RT_SUCCESS(rcStrict)
2109 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2110 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2111 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2112 return rcStrict;
2113 }
2114 }
2115 else
2116 {
2117 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2118 if (RT_SUCCESS(rc))
2119 { /* likely */ }
2120 else
2121 {
2122 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2123 return rc;
2124 }
2125 }
2126 pVCpu->iem.s.cbOpcode += cbToTryRead;
2127 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2128
2129 return VINF_SUCCESS;
2130}
2131
2132#endif /* !IEM_WITH_CODE_TLB */
2133#ifndef IEM_WITH_SETJMP
2134
2135/**
2136 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2137 *
2138 * @returns Strict VBox status code.
2139 * @param pVCpu The cross context virtual CPU structure of the
2140 * calling thread.
2141 * @param pb Where to return the opcode byte.
2142 */
2143DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPU pVCpu, uint8_t *pb)
2144{
2145 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2146 if (rcStrict == VINF_SUCCESS)
2147 {
2148 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2149 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2150 pVCpu->iem.s.offOpcode = offOpcode + 1;
2151 }
2152 else
2153 *pb = 0;
2154 return rcStrict;
2155}
2156
2157
2158/**
2159 * Fetches the next opcode byte.
2160 *
2161 * @returns Strict VBox status code.
2162 * @param pVCpu The cross context virtual CPU structure of the
2163 * calling thread.
2164 * @param pu8 Where to return the opcode byte.
2165 */
2166DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPU pVCpu, uint8_t *pu8)
2167{
2168 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2169 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2170 {
2171 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2172 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2173 return VINF_SUCCESS;
2174 }
2175 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2176}
2177
2178#else /* IEM_WITH_SETJMP */
2179
2180/**
2181 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2182 *
2183 * @returns The opcode byte.
2184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2185 */
2186DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPU pVCpu)
2187{
2188# ifdef IEM_WITH_CODE_TLB
2189 uint8_t u8;
2190 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2191 return u8;
2192# else
2193 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2194 if (rcStrict == VINF_SUCCESS)
2195 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2196 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2197# endif
2198}
2199
2200
2201/**
2202 * Fetches the next opcode byte, longjmp on error.
2203 *
2204 * @returns The opcode byte.
2205 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2206 */
2207DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPU pVCpu)
2208{
2209# ifdef IEM_WITH_CODE_TLB
2210 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2211 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2212 if (RT_LIKELY( pbBuf != NULL
2213 && offBuf < pVCpu->iem.s.cbInstrBuf))
2214 {
2215 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2216 return pbBuf[offBuf];
2217 }
2218# else
2219 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2220 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2221 {
2222 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2223 return pVCpu->iem.s.abOpcode[offOpcode];
2224 }
2225# endif
2226 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2227}
2228
2229#endif /* IEM_WITH_SETJMP */
2230
2231/**
2232 * Fetches the next opcode byte, returns automatically on failure.
2233 *
2234 * @param a_pu8 Where to return the opcode byte.
2235 * @remark Implicitly references pVCpu.
2236 */
2237#ifndef IEM_WITH_SETJMP
2238# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2239 do \
2240 { \
2241 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2242 if (rcStrict2 == VINF_SUCCESS) \
2243 { /* likely */ } \
2244 else \
2245 return rcStrict2; \
2246 } while (0)
2247#else
2248# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2249#endif /* IEM_WITH_SETJMP */
2250
2251
2252#ifndef IEM_WITH_SETJMP
2253/**
2254 * Fetches the next signed byte from the opcode stream.
2255 *
2256 * @returns Strict VBox status code.
2257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2258 * @param pi8 Where to return the signed byte.
2259 */
2260DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPU pVCpu, int8_t *pi8)
2261{
2262 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2263}
2264#endif /* !IEM_WITH_SETJMP */
2265
2266
2267/**
2268 * Fetches the next signed byte from the opcode stream, returning automatically
2269 * on failure.
2270 *
2271 * @param a_pi8 Where to return the signed byte.
2272 * @remark Implicitly references pVCpu.
2273 */
2274#ifndef IEM_WITH_SETJMP
2275# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2276 do \
2277 { \
2278 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2279 if (rcStrict2 != VINF_SUCCESS) \
2280 return rcStrict2; \
2281 } while (0)
2282#else /* IEM_WITH_SETJMP */
2283# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2284
2285#endif /* IEM_WITH_SETJMP */
2286
2287#ifndef IEM_WITH_SETJMP
2288
2289/**
2290 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2291 *
2292 * @returns Strict VBox status code.
2293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2294 * @param pu16 Where to return the opcode dword.
2295 */
2296DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2297{
2298 uint8_t u8;
2299 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2300 if (rcStrict == VINF_SUCCESS)
2301 *pu16 = (int8_t)u8;
2302 return rcStrict;
2303}
2304
2305
2306/**
2307 * Fetches the next signed byte from the opcode stream, extending it to
2308 * unsigned 16-bit.
2309 *
2310 * @returns Strict VBox status code.
2311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2312 * @param pu16 Where to return the unsigned word.
2313 */
2314DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPU pVCpu, uint16_t *pu16)
2315{
2316 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2317 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2318 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2319
2320 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2321 pVCpu->iem.s.offOpcode = offOpcode + 1;
2322 return VINF_SUCCESS;
2323}
2324
2325#endif /* !IEM_WITH_SETJMP */
2326
2327/**
2328 * Fetches the next signed byte from the opcode stream and sign-extending it to
2329 * a word, returning automatically on failure.
2330 *
2331 * @param a_pu16 Where to return the word.
2332 * @remark Implicitly references pVCpu.
2333 */
2334#ifndef IEM_WITH_SETJMP
2335# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2336 do \
2337 { \
2338 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2339 if (rcStrict2 != VINF_SUCCESS) \
2340 return rcStrict2; \
2341 } while (0)
2342#else
2343# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2344#endif
2345
2346#ifndef IEM_WITH_SETJMP
2347
2348/**
2349 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2350 *
2351 * @returns Strict VBox status code.
2352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2353 * @param pu32 Where to return the opcode dword.
2354 */
2355DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2356{
2357 uint8_t u8;
2358 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2359 if (rcStrict == VINF_SUCCESS)
2360 *pu32 = (int8_t)u8;
2361 return rcStrict;
2362}
2363
2364
2365/**
2366 * Fetches the next signed byte from the opcode stream, extending it to
2367 * unsigned 32-bit.
2368 *
2369 * @returns Strict VBox status code.
2370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2371 * @param pu32 Where to return the unsigned dword.
2372 */
2373DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPU pVCpu, uint32_t *pu32)
2374{
2375 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2376 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2377 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2378
2379 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2380 pVCpu->iem.s.offOpcode = offOpcode + 1;
2381 return VINF_SUCCESS;
2382}
2383
2384#endif /* !IEM_WITH_SETJMP */
2385
2386/**
2387 * Fetches the next signed byte from the opcode stream and sign-extending it to
2388 * a word, returning automatically on failure.
2389 *
2390 * @param a_pu32 Where to return the word.
2391 * @remark Implicitly references pVCpu.
2392 */
2393#ifndef IEM_WITH_SETJMP
2394#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2395 do \
2396 { \
2397 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2398 if (rcStrict2 != VINF_SUCCESS) \
2399 return rcStrict2; \
2400 } while (0)
2401#else
2402# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2403#endif
2404
2405#ifndef IEM_WITH_SETJMP
2406
2407/**
2408 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2409 *
2410 * @returns Strict VBox status code.
2411 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2412 * @param pu64 Where to return the opcode qword.
2413 */
2414DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2415{
2416 uint8_t u8;
2417 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2418 if (rcStrict == VINF_SUCCESS)
2419 *pu64 = (int8_t)u8;
2420 return rcStrict;
2421}
2422
2423
2424/**
2425 * Fetches the next signed byte from the opcode stream, extending it to
2426 * unsigned 64-bit.
2427 *
2428 * @returns Strict VBox status code.
2429 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2430 * @param pu64 Where to return the unsigned qword.
2431 */
2432DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPU pVCpu, uint64_t *pu64)
2433{
2434 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2435 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2436 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2437
2438 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2439 pVCpu->iem.s.offOpcode = offOpcode + 1;
2440 return VINF_SUCCESS;
2441}
2442
2443#endif /* !IEM_WITH_SETJMP */
2444
2445
2446/**
2447 * Fetches the next signed byte from the opcode stream and sign-extending it to
2448 * a word, returning automatically on failure.
2449 *
2450 * @param a_pu64 Where to return the word.
2451 * @remark Implicitly references pVCpu.
2452 */
2453#ifndef IEM_WITH_SETJMP
2454# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2455 do \
2456 { \
2457 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2458 if (rcStrict2 != VINF_SUCCESS) \
2459 return rcStrict2; \
2460 } while (0)
2461#else
2462# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2463#endif
2464
2465
2466#ifndef IEM_WITH_SETJMP
2467/**
2468 * Fetches the next opcode byte.
2469 *
2470 * @returns Strict VBox status code.
2471 * @param pVCpu The cross context virtual CPU structure of the
2472 * calling thread.
2473 * @param pu8 Where to return the opcode byte.
2474 */
2475DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPU pVCpu, uint8_t *pu8)
2476{
2477 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2478 pVCpu->iem.s.offModRm = offOpcode;
2479 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2480 {
2481 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2482 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2483 return VINF_SUCCESS;
2484 }
2485 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2486}
2487#else /* IEM_WITH_SETJMP */
2488/**
2489 * Fetches the next opcode byte, longjmp on error.
2490 *
2491 * @returns The opcode byte.
2492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2493 */
2494DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPU pVCpu)
2495{
2496# ifdef IEM_WITH_CODE_TLB
2497 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2498 pVCpu->iem.s.offModRm = offBuf;
2499 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2500 if (RT_LIKELY( pbBuf != NULL
2501 && offBuf < pVCpu->iem.s.cbInstrBuf))
2502 {
2503 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2504 return pbBuf[offBuf];
2505 }
2506# else
2507 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2508 pVCpu->iem.s.offModRm = offOpcode;
2509 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2510 {
2511 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2512 return pVCpu->iem.s.abOpcode[offOpcode];
2513 }
2514# endif
2515 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2516}
2517#endif /* IEM_WITH_SETJMP */
2518
2519/**
2520 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2521 * on failure.
2522 *
2523 * Will note down the position of the ModR/M byte for VT-x exits.
2524 *
2525 * @param a_pbRm Where to return the RM opcode byte.
2526 * @remark Implicitly references pVCpu.
2527 */
2528#ifndef IEM_WITH_SETJMP
2529# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2530 do \
2531 { \
2532 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2533 if (rcStrict2 == VINF_SUCCESS) \
2534 { /* likely */ } \
2535 else \
2536 return rcStrict2; \
2537 } while (0)
2538#else
2539# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2540#endif /* IEM_WITH_SETJMP */
2541
2542
2543#ifndef IEM_WITH_SETJMP
2544
2545/**
2546 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2547 *
2548 * @returns Strict VBox status code.
2549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2550 * @param pu16 Where to return the opcode word.
2551 */
2552DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPU pVCpu, uint16_t *pu16)
2553{
2554 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2555 if (rcStrict == VINF_SUCCESS)
2556 {
2557 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2558# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2559 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2560# else
2561 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2562# endif
2563 pVCpu->iem.s.offOpcode = offOpcode + 2;
2564 }
2565 else
2566 *pu16 = 0;
2567 return rcStrict;
2568}
2569
2570
2571/**
2572 * Fetches the next opcode word.
2573 *
2574 * @returns Strict VBox status code.
2575 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2576 * @param pu16 Where to return the opcode word.
2577 */
2578DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPU pVCpu, uint16_t *pu16)
2579{
2580 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2581 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2582 {
2583 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2584# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2585 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2586# else
2587 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2588# endif
2589 return VINF_SUCCESS;
2590 }
2591 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2592}
2593
2594#else /* IEM_WITH_SETJMP */
2595
2596/**
2597 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2598 *
2599 * @returns The opcode word.
2600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2601 */
2602DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPU pVCpu)
2603{
2604# ifdef IEM_WITH_CODE_TLB
2605 uint16_t u16;
2606 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2607 return u16;
2608# else
2609 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2610 if (rcStrict == VINF_SUCCESS)
2611 {
2612 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2613 pVCpu->iem.s.offOpcode += 2;
2614# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2615 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2616# else
2617 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2618# endif
2619 }
2620 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2621# endif
2622}
2623
2624
2625/**
2626 * Fetches the next opcode word, longjmp on error.
2627 *
2628 * @returns The opcode word.
2629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2630 */
2631DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPU pVCpu)
2632{
2633# ifdef IEM_WITH_CODE_TLB
2634 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2635 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2636 if (RT_LIKELY( pbBuf != NULL
2637 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2638 {
2639 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2640# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2641 return *(uint16_t const *)&pbBuf[offBuf];
2642# else
2643 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2644# endif
2645 }
2646# else
2647 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2648 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2649 {
2650 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2651# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2652 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2653# else
2654 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2655# endif
2656 }
2657# endif
2658 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2659}
2660
2661#endif /* IEM_WITH_SETJMP */
2662
2663
2664/**
2665 * Fetches the next opcode word, returns automatically on failure.
2666 *
2667 * @param a_pu16 Where to return the opcode word.
2668 * @remark Implicitly references pVCpu.
2669 */
2670#ifndef IEM_WITH_SETJMP
2671# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2672 do \
2673 { \
2674 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2675 if (rcStrict2 != VINF_SUCCESS) \
2676 return rcStrict2; \
2677 } while (0)
2678#else
2679# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2680#endif
2681
2682#ifndef IEM_WITH_SETJMP
2683
2684/**
2685 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2689 * @param pu32 Where to return the opcode double word.
2690 */
2691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2692{
2693 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2694 if (rcStrict == VINF_SUCCESS)
2695 {
2696 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2697 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2698 pVCpu->iem.s.offOpcode = offOpcode + 2;
2699 }
2700 else
2701 *pu32 = 0;
2702 return rcStrict;
2703}
2704
2705
2706/**
2707 * Fetches the next opcode word, zero extending it to a double word.
2708 *
2709 * @returns Strict VBox status code.
2710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2711 * @param pu32 Where to return the opcode double word.
2712 */
2713DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPU pVCpu, uint32_t *pu32)
2714{
2715 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2716 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2717 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2718
2719 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2720 pVCpu->iem.s.offOpcode = offOpcode + 2;
2721 return VINF_SUCCESS;
2722}
2723
2724#endif /* !IEM_WITH_SETJMP */
2725
2726
2727/**
2728 * Fetches the next opcode word and zero extends it to a double word, returns
2729 * automatically on failure.
2730 *
2731 * @param a_pu32 Where to return the opcode double word.
2732 * @remark Implicitly references pVCpu.
2733 */
2734#ifndef IEM_WITH_SETJMP
2735# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2736 do \
2737 { \
2738 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2739 if (rcStrict2 != VINF_SUCCESS) \
2740 return rcStrict2; \
2741 } while (0)
2742#else
2743# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2744#endif
2745
2746#ifndef IEM_WITH_SETJMP
2747
2748/**
2749 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2750 *
2751 * @returns Strict VBox status code.
2752 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2753 * @param pu64 Where to return the opcode quad word.
2754 */
2755DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
2756{
2757 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2758 if (rcStrict == VINF_SUCCESS)
2759 {
2760 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2761 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2762 pVCpu->iem.s.offOpcode = offOpcode + 2;
2763 }
2764 else
2765 *pu64 = 0;
2766 return rcStrict;
2767}
2768
2769
2770/**
2771 * Fetches the next opcode word, zero extending it to a quad word.
2772 *
2773 * @returns Strict VBox status code.
2774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2775 * @param pu64 Where to return the opcode quad word.
2776 */
2777DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPU pVCpu, uint64_t *pu64)
2778{
2779 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2780 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2781 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2782
2783 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2784 pVCpu->iem.s.offOpcode = offOpcode + 2;
2785 return VINF_SUCCESS;
2786}
2787
2788#endif /* !IEM_WITH_SETJMP */
2789
2790/**
2791 * Fetches the next opcode word and zero extends it to a quad word, returns
2792 * automatically on failure.
2793 *
2794 * @param a_pu64 Where to return the opcode quad word.
2795 * @remark Implicitly references pVCpu.
2796 */
2797#ifndef IEM_WITH_SETJMP
2798# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2799 do \
2800 { \
2801 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2802 if (rcStrict2 != VINF_SUCCESS) \
2803 return rcStrict2; \
2804 } while (0)
2805#else
2806# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2807#endif
2808
2809
2810#ifndef IEM_WITH_SETJMP
2811/**
2812 * Fetches the next signed word from the opcode stream.
2813 *
2814 * @returns Strict VBox status code.
2815 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2816 * @param pi16 Where to return the signed word.
2817 */
2818DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPU pVCpu, int16_t *pi16)
2819{
2820 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2821}
2822#endif /* !IEM_WITH_SETJMP */
2823
2824
2825/**
2826 * Fetches the next signed word from the opcode stream, returning automatically
2827 * on failure.
2828 *
2829 * @param a_pi16 Where to return the signed word.
2830 * @remark Implicitly references pVCpu.
2831 */
2832#ifndef IEM_WITH_SETJMP
2833# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2834 do \
2835 { \
2836 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2837 if (rcStrict2 != VINF_SUCCESS) \
2838 return rcStrict2; \
2839 } while (0)
2840#else
2841# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2842#endif
2843
2844#ifndef IEM_WITH_SETJMP
2845
2846/**
2847 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2848 *
2849 * @returns Strict VBox status code.
2850 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2851 * @param pu32 Where to return the opcode dword.
2852 */
2853DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPU pVCpu, uint32_t *pu32)
2854{
2855 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2856 if (rcStrict == VINF_SUCCESS)
2857 {
2858 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2859# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2860 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2861# else
2862 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2863 pVCpu->iem.s.abOpcode[offOpcode + 1],
2864 pVCpu->iem.s.abOpcode[offOpcode + 2],
2865 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2866# endif
2867 pVCpu->iem.s.offOpcode = offOpcode + 4;
2868 }
2869 else
2870 *pu32 = 0;
2871 return rcStrict;
2872}
2873
2874
2875/**
2876 * Fetches the next opcode dword.
2877 *
2878 * @returns Strict VBox status code.
2879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2880 * @param pu32 Where to return the opcode double word.
2881 */
2882DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPU pVCpu, uint32_t *pu32)
2883{
2884 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2885 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2886 {
2887 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2888# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2889 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2890# else
2891 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2892 pVCpu->iem.s.abOpcode[offOpcode + 1],
2893 pVCpu->iem.s.abOpcode[offOpcode + 2],
2894 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2895# endif
2896 return VINF_SUCCESS;
2897 }
2898 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2899}
2900
2901#else /* !IEM_WITH_SETJMP */
2902
2903/**
2904 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2905 *
2906 * @returns The opcode dword.
2907 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2908 */
2909DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPU pVCpu)
2910{
2911# ifdef IEM_WITH_CODE_TLB
2912 uint32_t u32;
2913 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2914 return u32;
2915# else
2916 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2917 if (rcStrict == VINF_SUCCESS)
2918 {
2919 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2920 pVCpu->iem.s.offOpcode = offOpcode + 4;
2921# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2922 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2923# else
2924 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2925 pVCpu->iem.s.abOpcode[offOpcode + 1],
2926 pVCpu->iem.s.abOpcode[offOpcode + 2],
2927 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2928# endif
2929 }
2930 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2931# endif
2932}
2933
2934
2935/**
2936 * Fetches the next opcode dword, longjmp on error.
2937 *
2938 * @returns The opcode dword.
2939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2940 */
2941DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPU pVCpu)
2942{
2943# ifdef IEM_WITH_CODE_TLB
2944 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2945 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2946 if (RT_LIKELY( pbBuf != NULL
2947 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2948 {
2949 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2950# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2951 return *(uint32_t const *)&pbBuf[offBuf];
2952# else
2953 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2954 pbBuf[offBuf + 1],
2955 pbBuf[offBuf + 2],
2956 pbBuf[offBuf + 3]);
2957# endif
2958 }
2959# else
2960 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2961 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2962 {
2963 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2964# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2965 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2966# else
2967 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2968 pVCpu->iem.s.abOpcode[offOpcode + 1],
2969 pVCpu->iem.s.abOpcode[offOpcode + 2],
2970 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2971# endif
2972 }
2973# endif
2974 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2975}
2976
2977#endif /* !IEM_WITH_SETJMP */
2978
2979
2980/**
2981 * Fetches the next opcode dword, returns automatically on failure.
2982 *
2983 * @param a_pu32 Where to return the opcode dword.
2984 * @remark Implicitly references pVCpu.
2985 */
2986#ifndef IEM_WITH_SETJMP
2987# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2988 do \
2989 { \
2990 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2991 if (rcStrict2 != VINF_SUCCESS) \
2992 return rcStrict2; \
2993 } while (0)
2994#else
2995# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2996#endif
2997
2998#ifndef IEM_WITH_SETJMP
2999
3000/**
3001 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
3002 *
3003 * @returns Strict VBox status code.
3004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3005 * @param pu64 Where to return the opcode dword.
3006 */
3007DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3008{
3009 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3010 if (rcStrict == VINF_SUCCESS)
3011 {
3012 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3013 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3014 pVCpu->iem.s.abOpcode[offOpcode + 1],
3015 pVCpu->iem.s.abOpcode[offOpcode + 2],
3016 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3017 pVCpu->iem.s.offOpcode = offOpcode + 4;
3018 }
3019 else
3020 *pu64 = 0;
3021 return rcStrict;
3022}
3023
3024
3025/**
3026 * Fetches the next opcode dword, zero extending it to a quad word.
3027 *
3028 * @returns Strict VBox status code.
3029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3030 * @param pu64 Where to return the opcode quad word.
3031 */
3032DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPU pVCpu, uint64_t *pu64)
3033{
3034 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3035 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3036 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
3037
3038 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3039 pVCpu->iem.s.abOpcode[offOpcode + 1],
3040 pVCpu->iem.s.abOpcode[offOpcode + 2],
3041 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3042 pVCpu->iem.s.offOpcode = offOpcode + 4;
3043 return VINF_SUCCESS;
3044}
3045
3046#endif /* !IEM_WITH_SETJMP */
3047
3048
3049/**
3050 * Fetches the next opcode dword and zero extends it to a quad word, returns
3051 * automatically on failure.
3052 *
3053 * @param a_pu64 Where to return the opcode quad word.
3054 * @remark Implicitly references pVCpu.
3055 */
3056#ifndef IEM_WITH_SETJMP
3057# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
3058 do \
3059 { \
3060 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
3061 if (rcStrict2 != VINF_SUCCESS) \
3062 return rcStrict2; \
3063 } while (0)
3064#else
3065# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
3066#endif
3067
3068
3069#ifndef IEM_WITH_SETJMP
3070/**
3071 * Fetches the next signed double word from the opcode stream.
3072 *
3073 * @returns Strict VBox status code.
3074 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3075 * @param pi32 Where to return the signed double word.
3076 */
3077DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPU pVCpu, int32_t *pi32)
3078{
3079 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
3080}
3081#endif
3082
3083/**
3084 * Fetches the next signed double word from the opcode stream, returning
3085 * automatically on failure.
3086 *
3087 * @param a_pi32 Where to return the signed double word.
3088 * @remark Implicitly references pVCpu.
3089 */
3090#ifndef IEM_WITH_SETJMP
3091# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3092 do \
3093 { \
3094 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3095 if (rcStrict2 != VINF_SUCCESS) \
3096 return rcStrict2; \
3097 } while (0)
3098#else
3099# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3100#endif
3101
3102#ifndef IEM_WITH_SETJMP
3103
3104/**
3105 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3106 *
3107 * @returns Strict VBox status code.
3108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3109 * @param pu64 Where to return the opcode qword.
3110 */
3111DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3112{
3113 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3114 if (rcStrict == VINF_SUCCESS)
3115 {
3116 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3117 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3118 pVCpu->iem.s.abOpcode[offOpcode + 1],
3119 pVCpu->iem.s.abOpcode[offOpcode + 2],
3120 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3121 pVCpu->iem.s.offOpcode = offOpcode + 4;
3122 }
3123 else
3124 *pu64 = 0;
3125 return rcStrict;
3126}
3127
3128
3129/**
3130 * Fetches the next opcode dword, sign extending it into a quad word.
3131 *
3132 * @returns Strict VBox status code.
3133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3134 * @param pu64 Where to return the opcode quad word.
3135 */
3136DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPU pVCpu, uint64_t *pu64)
3137{
3138 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3139 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3140 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3141
3142 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3143 pVCpu->iem.s.abOpcode[offOpcode + 1],
3144 pVCpu->iem.s.abOpcode[offOpcode + 2],
3145 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3146 *pu64 = i32;
3147 pVCpu->iem.s.offOpcode = offOpcode + 4;
3148 return VINF_SUCCESS;
3149}
3150
3151#endif /* !IEM_WITH_SETJMP */
3152
3153
3154/**
3155 * Fetches the next opcode double word and sign extends it to a quad word,
3156 * returns automatically on failure.
3157 *
3158 * @param a_pu64 Where to return the opcode quad word.
3159 * @remark Implicitly references pVCpu.
3160 */
3161#ifndef IEM_WITH_SETJMP
3162# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3163 do \
3164 { \
3165 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3166 if (rcStrict2 != VINF_SUCCESS) \
3167 return rcStrict2; \
3168 } while (0)
3169#else
3170# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3171#endif
3172
3173#ifndef IEM_WITH_SETJMP
3174
3175/**
3176 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3177 *
3178 * @returns Strict VBox status code.
3179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3180 * @param pu64 Where to return the opcode qword.
3181 */
3182DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPU pVCpu, uint64_t *pu64)
3183{
3184 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3185 if (rcStrict == VINF_SUCCESS)
3186 {
3187 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3188# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3189 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3190# else
3191 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3192 pVCpu->iem.s.abOpcode[offOpcode + 1],
3193 pVCpu->iem.s.abOpcode[offOpcode + 2],
3194 pVCpu->iem.s.abOpcode[offOpcode + 3],
3195 pVCpu->iem.s.abOpcode[offOpcode + 4],
3196 pVCpu->iem.s.abOpcode[offOpcode + 5],
3197 pVCpu->iem.s.abOpcode[offOpcode + 6],
3198 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3199# endif
3200 pVCpu->iem.s.offOpcode = offOpcode + 8;
3201 }
3202 else
3203 *pu64 = 0;
3204 return rcStrict;
3205}
3206
3207
3208/**
3209 * Fetches the next opcode qword.
3210 *
3211 * @returns Strict VBox status code.
3212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3213 * @param pu64 Where to return the opcode qword.
3214 */
3215DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPU pVCpu, uint64_t *pu64)
3216{
3217 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3218 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3219 {
3220# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3221 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3222# else
3223 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3224 pVCpu->iem.s.abOpcode[offOpcode + 1],
3225 pVCpu->iem.s.abOpcode[offOpcode + 2],
3226 pVCpu->iem.s.abOpcode[offOpcode + 3],
3227 pVCpu->iem.s.abOpcode[offOpcode + 4],
3228 pVCpu->iem.s.abOpcode[offOpcode + 5],
3229 pVCpu->iem.s.abOpcode[offOpcode + 6],
3230 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3231# endif
3232 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3233 return VINF_SUCCESS;
3234 }
3235 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3236}
3237
3238#else /* IEM_WITH_SETJMP */
3239
3240/**
3241 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3242 *
3243 * @returns The opcode qword.
3244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3245 */
3246DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPU pVCpu)
3247{
3248# ifdef IEM_WITH_CODE_TLB
3249 uint64_t u64;
3250 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3251 return u64;
3252# else
3253 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3254 if (rcStrict == VINF_SUCCESS)
3255 {
3256 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3257 pVCpu->iem.s.offOpcode = offOpcode + 8;
3258# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3259 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3260# else
3261 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3262 pVCpu->iem.s.abOpcode[offOpcode + 1],
3263 pVCpu->iem.s.abOpcode[offOpcode + 2],
3264 pVCpu->iem.s.abOpcode[offOpcode + 3],
3265 pVCpu->iem.s.abOpcode[offOpcode + 4],
3266 pVCpu->iem.s.abOpcode[offOpcode + 5],
3267 pVCpu->iem.s.abOpcode[offOpcode + 6],
3268 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3269# endif
3270 }
3271 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3272# endif
3273}
3274
3275
3276/**
3277 * Fetches the next opcode qword, longjmp on error.
3278 *
3279 * @returns The opcode qword.
3280 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3281 */
3282DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPU pVCpu)
3283{
3284# ifdef IEM_WITH_CODE_TLB
3285 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3286 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3287 if (RT_LIKELY( pbBuf != NULL
3288 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3289 {
3290 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3291# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3292 return *(uint64_t const *)&pbBuf[offBuf];
3293# else
3294 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3295 pbBuf[offBuf + 1],
3296 pbBuf[offBuf + 2],
3297 pbBuf[offBuf + 3],
3298 pbBuf[offBuf + 4],
3299 pbBuf[offBuf + 5],
3300 pbBuf[offBuf + 6],
3301 pbBuf[offBuf + 7]);
3302# endif
3303 }
3304# else
3305 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3306 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3307 {
3308 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3309# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3310 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3311# else
3312 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3313 pVCpu->iem.s.abOpcode[offOpcode + 1],
3314 pVCpu->iem.s.abOpcode[offOpcode + 2],
3315 pVCpu->iem.s.abOpcode[offOpcode + 3],
3316 pVCpu->iem.s.abOpcode[offOpcode + 4],
3317 pVCpu->iem.s.abOpcode[offOpcode + 5],
3318 pVCpu->iem.s.abOpcode[offOpcode + 6],
3319 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3320# endif
3321 }
3322# endif
3323 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3324}
3325
3326#endif /* IEM_WITH_SETJMP */
3327
3328/**
3329 * Fetches the next opcode quad word, returns automatically on failure.
3330 *
3331 * @param a_pu64 Where to return the opcode quad word.
3332 * @remark Implicitly references pVCpu.
3333 */
3334#ifndef IEM_WITH_SETJMP
3335# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3336 do \
3337 { \
3338 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3339 if (rcStrict2 != VINF_SUCCESS) \
3340 return rcStrict2; \
3341 } while (0)
3342#else
3343# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3344#endif
3345
3346
3347/** @name Misc Worker Functions.
3348 * @{
3349 */
3350
3351/**
3352 * Gets the exception class for the specified exception vector.
3353 *
3354 * @returns The class of the specified exception.
3355 * @param uVector The exception vector.
3356 */
3357IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3358{
3359 Assert(uVector <= X86_XCPT_LAST);
3360 switch (uVector)
3361 {
3362 case X86_XCPT_DE:
3363 case X86_XCPT_TS:
3364 case X86_XCPT_NP:
3365 case X86_XCPT_SS:
3366 case X86_XCPT_GP:
3367 case X86_XCPT_SX: /* AMD only */
3368 return IEMXCPTCLASS_CONTRIBUTORY;
3369
3370 case X86_XCPT_PF:
3371 case X86_XCPT_VE: /* Intel only */
3372 return IEMXCPTCLASS_PAGE_FAULT;
3373
3374 case X86_XCPT_DF:
3375 return IEMXCPTCLASS_DOUBLE_FAULT;
3376 }
3377 return IEMXCPTCLASS_BENIGN;
3378}
3379
3380
3381/**
3382 * Evaluates how to handle an exception caused during delivery of another event
3383 * (exception / interrupt).
3384 *
3385 * @returns How to handle the recursive exception.
3386 * @param pVCpu The cross context virtual CPU structure of the
3387 * calling thread.
3388 * @param fPrevFlags The flags of the previous event.
3389 * @param uPrevVector The vector of the previous event.
3390 * @param fCurFlags The flags of the current exception.
3391 * @param uCurVector The vector of the current exception.
3392 * @param pfXcptRaiseInfo Where to store additional information about the
3393 * exception condition. Optional.
3394 */
3395VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPU pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3396 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3397{
3398 /*
3399 * Only CPU exceptions can be raised while delivering other events, software interrupt
3400 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3401 */
3402 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3403 Assert(pVCpu); RT_NOREF(pVCpu);
3404 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3405
3406 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3407 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3408 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3409 {
3410 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3411 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3412 {
3413 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3414 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3415 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3416 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3417 {
3418 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3419 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3420 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3421 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3422 uCurVector, pVCpu->cpum.GstCtx.cr2));
3423 }
3424 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3425 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3426 {
3427 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3428 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3429 }
3430 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3431 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3432 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3433 {
3434 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3435 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3436 }
3437 }
3438 else
3439 {
3440 if (uPrevVector == X86_XCPT_NMI)
3441 {
3442 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3443 if (uCurVector == X86_XCPT_PF)
3444 {
3445 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3446 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3447 }
3448 }
3449 else if ( uPrevVector == X86_XCPT_AC
3450 && uCurVector == X86_XCPT_AC)
3451 {
3452 enmRaise = IEMXCPTRAISE_CPU_HANG;
3453 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3454 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3455 }
3456 }
3457 }
3458 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3459 {
3460 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3461 if (uCurVector == X86_XCPT_PF)
3462 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3463 }
3464 else
3465 {
3466 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3467 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3468 }
3469
3470 if (pfXcptRaiseInfo)
3471 *pfXcptRaiseInfo = fRaiseInfo;
3472 return enmRaise;
3473}
3474
3475
3476/**
3477 * Enters the CPU shutdown state initiated by a triple fault or other
3478 * unrecoverable conditions.
3479 *
3480 * @returns Strict VBox status code.
3481 * @param pVCpu The cross context virtual CPU structure of the
3482 * calling thread.
3483 */
3484IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPU pVCpu)
3485{
3486 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3487 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu);
3488
3489 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3490 {
3491 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3492 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3493 }
3494
3495 RT_NOREF(pVCpu);
3496 return VINF_EM_TRIPLE_FAULT;
3497}
3498
3499
3500/**
3501 * Validates a new SS segment.
3502 *
3503 * @returns VBox strict status code.
3504 * @param pVCpu The cross context virtual CPU structure of the
3505 * calling thread.
3506 * @param NewSS The new SS selctor.
3507 * @param uCpl The CPL to load the stack for.
3508 * @param pDesc Where to return the descriptor.
3509 */
3510IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPU pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3511{
3512 /* Null selectors are not allowed (we're not called for dispatching
3513 interrupts with SS=0 in long mode). */
3514 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3515 {
3516 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3517 return iemRaiseTaskSwitchFault0(pVCpu);
3518 }
3519
3520 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3521 if ((NewSS & X86_SEL_RPL) != uCpl)
3522 {
3523 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3524 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3525 }
3526
3527 /*
3528 * Read the descriptor.
3529 */
3530 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3531 if (rcStrict != VINF_SUCCESS)
3532 return rcStrict;
3533
3534 /*
3535 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3536 */
3537 if (!pDesc->Legacy.Gen.u1DescType)
3538 {
3539 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3540 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3541 }
3542
3543 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3544 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3545 {
3546 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3547 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3548 }
3549 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3550 {
3551 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3552 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3553 }
3554
3555 /* Is it there? */
3556 /** @todo testcase: Is this checked before the canonical / limit check below? */
3557 if (!pDesc->Legacy.Gen.u1Present)
3558 {
3559 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3560 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3561 }
3562
3563 return VINF_SUCCESS;
3564}
3565
3566
3567/**
3568 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3569 * not.
3570 *
3571 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3572 */
3573#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3574# define IEMMISC_GET_EFL(a_pVCpu) ( CPUMRawGetEFlags(a_pVCpu) )
3575#else
3576# define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3577#endif
3578
3579/**
3580 * Updates the EFLAGS in the correct manner wrt. PATM.
3581 *
3582 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3583 * @param a_fEfl The new EFLAGS.
3584 */
3585#ifdef VBOX_WITH_RAW_MODE_NOT_R0
3586# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl)
3587#else
3588# define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3589#endif
3590
3591
3592/** @} */
3593
3594/** @name Raising Exceptions.
3595 *
3596 * @{
3597 */
3598
3599
3600/**
3601 * Loads the specified stack far pointer from the TSS.
3602 *
3603 * @returns VBox strict status code.
3604 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3605 * @param uCpl The CPL to load the stack for.
3606 * @param pSelSS Where to return the new stack segment.
3607 * @param puEsp Where to return the new stack pointer.
3608 */
3609IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPU pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3610{
3611 VBOXSTRICTRC rcStrict;
3612 Assert(uCpl < 4);
3613
3614 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3615 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3616 {
3617 /*
3618 * 16-bit TSS (X86TSS16).
3619 */
3620 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3621 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3622 {
3623 uint32_t off = uCpl * 4 + 2;
3624 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3625 {
3626 /** @todo check actual access pattern here. */
3627 uint32_t u32Tmp = 0; /* gcc maybe... */
3628 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3629 if (rcStrict == VINF_SUCCESS)
3630 {
3631 *puEsp = RT_LOWORD(u32Tmp);
3632 *pSelSS = RT_HIWORD(u32Tmp);
3633 return VINF_SUCCESS;
3634 }
3635 }
3636 else
3637 {
3638 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3639 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3640 }
3641 break;
3642 }
3643
3644 /*
3645 * 32-bit TSS (X86TSS32).
3646 */
3647 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3648 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3649 {
3650 uint32_t off = uCpl * 8 + 4;
3651 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3652 {
3653/** @todo check actual access pattern here. */
3654 uint64_t u64Tmp;
3655 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3656 if (rcStrict == VINF_SUCCESS)
3657 {
3658 *puEsp = u64Tmp & UINT32_MAX;
3659 *pSelSS = (RTSEL)(u64Tmp >> 32);
3660 return VINF_SUCCESS;
3661 }
3662 }
3663 else
3664 {
3665 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3666 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3667 }
3668 break;
3669 }
3670
3671 default:
3672 AssertFailed();
3673 rcStrict = VERR_IEM_IPE_4;
3674 break;
3675 }
3676
3677 *puEsp = 0; /* make gcc happy */
3678 *pSelSS = 0; /* make gcc happy */
3679 return rcStrict;
3680}
3681
3682
3683/**
3684 * Loads the specified stack pointer from the 64-bit TSS.
3685 *
3686 * @returns VBox strict status code.
3687 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3688 * @param uCpl The CPL to load the stack for.
3689 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3690 * @param puRsp Where to return the new stack pointer.
3691 */
3692IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPU pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3693{
3694 Assert(uCpl < 4);
3695 Assert(uIst < 8);
3696 *puRsp = 0; /* make gcc happy */
3697
3698 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3699 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3700
3701 uint32_t off;
3702 if (uIst)
3703 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3704 else
3705 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3706 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3707 {
3708 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3709 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3710 }
3711
3712 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3713}
3714
3715
3716/**
3717 * Adjust the CPU state according to the exception being raised.
3718 *
3719 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3720 * @param u8Vector The exception that has been raised.
3721 */
3722DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPU pVCpu, uint8_t u8Vector)
3723{
3724 switch (u8Vector)
3725 {
3726 case X86_XCPT_DB:
3727 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3728 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3729 break;
3730 /** @todo Read the AMD and Intel exception reference... */
3731 }
3732}
3733
3734
3735/**
3736 * Implements exceptions and interrupts for real mode.
3737 *
3738 * @returns VBox strict status code.
3739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3740 * @param cbInstr The number of bytes to offset rIP by in the return
3741 * address.
3742 * @param u8Vector The interrupt / exception vector number.
3743 * @param fFlags The flags.
3744 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3745 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3746 */
3747IEM_STATIC VBOXSTRICTRC
3748iemRaiseXcptOrIntInRealMode(PVMCPU pVCpu,
3749 uint8_t cbInstr,
3750 uint8_t u8Vector,
3751 uint32_t fFlags,
3752 uint16_t uErr,
3753 uint64_t uCr2)
3754{
3755 NOREF(uErr); NOREF(uCr2);
3756 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3757
3758 /*
3759 * Read the IDT entry.
3760 */
3761 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3762 {
3763 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3764 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3765 }
3766 RTFAR16 Idte;
3767 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3768 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3769 {
3770 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3771 return rcStrict;
3772 }
3773
3774 /*
3775 * Push the stack frame.
3776 */
3777 uint16_t *pu16Frame;
3778 uint64_t uNewRsp;
3779 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3780 if (rcStrict != VINF_SUCCESS)
3781 return rcStrict;
3782
3783 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3784#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3785 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3786 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3787 fEfl |= UINT16_C(0xf000);
3788#endif
3789 pu16Frame[2] = (uint16_t)fEfl;
3790 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3791 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3792 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3793 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3794 return rcStrict;
3795
3796 /*
3797 * Load the vector address into cs:ip and make exception specific state
3798 * adjustments.
3799 */
3800 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3801 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3802 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3803 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3804 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3805 pVCpu->cpum.GstCtx.rip = Idte.off;
3806 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3807 IEMMISC_SET_EFL(pVCpu, fEfl);
3808
3809 /** @todo do we actually do this in real mode? */
3810 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3811 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3812
3813 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3814}
3815
3816
3817/**
3818 * Loads a NULL data selector into when coming from V8086 mode.
3819 *
3820 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3821 * @param pSReg Pointer to the segment register.
3822 */
3823IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPU pVCpu, PCPUMSELREG pSReg)
3824{
3825 pSReg->Sel = 0;
3826 pSReg->ValidSel = 0;
3827 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3828 {
3829 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3830 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3831 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3832 }
3833 else
3834 {
3835 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3836 /** @todo check this on AMD-V */
3837 pSReg->u64Base = 0;
3838 pSReg->u32Limit = 0;
3839 }
3840}
3841
3842
3843/**
3844 * Loads a segment selector during a task switch in V8086 mode.
3845 *
3846 * @param pSReg Pointer to the segment register.
3847 * @param uSel The selector value to load.
3848 */
3849IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3850{
3851 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3852 pSReg->Sel = uSel;
3853 pSReg->ValidSel = uSel;
3854 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3855 pSReg->u64Base = uSel << 4;
3856 pSReg->u32Limit = 0xffff;
3857 pSReg->Attr.u = 0xf3;
3858}
3859
3860
3861/**
3862 * Loads a NULL data selector into a selector register, both the hidden and
3863 * visible parts, in protected mode.
3864 *
3865 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3866 * @param pSReg Pointer to the segment register.
3867 * @param uRpl The RPL.
3868 */
3869IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPU pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3870{
3871 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3872 * data selector in protected mode. */
3873 pSReg->Sel = uRpl;
3874 pSReg->ValidSel = uRpl;
3875 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3876 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3877 {
3878 /* VT-x (Intel 3960x) observed doing something like this. */
3879 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3880 pSReg->u32Limit = UINT32_MAX;
3881 pSReg->u64Base = 0;
3882 }
3883 else
3884 {
3885 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3886 pSReg->u32Limit = 0;
3887 pSReg->u64Base = 0;
3888 }
3889}
3890
3891
3892/**
3893 * Loads a segment selector during a task switch in protected mode.
3894 *
3895 * In this task switch scenario, we would throw \#TS exceptions rather than
3896 * \#GPs.
3897 *
3898 * @returns VBox strict status code.
3899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3900 * @param pSReg Pointer to the segment register.
3901 * @param uSel The new selector value.
3902 *
3903 * @remarks This does _not_ handle CS or SS.
3904 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3905 */
3906IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPU pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3907{
3908 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3909
3910 /* Null data selector. */
3911 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3912 {
3913 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3914 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3915 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3916 return VINF_SUCCESS;
3917 }
3918
3919 /* Fetch the descriptor. */
3920 IEMSELDESC Desc;
3921 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3922 if (rcStrict != VINF_SUCCESS)
3923 {
3924 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3925 VBOXSTRICTRC_VAL(rcStrict)));
3926 return rcStrict;
3927 }
3928
3929 /* Must be a data segment or readable code segment. */
3930 if ( !Desc.Legacy.Gen.u1DescType
3931 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3932 {
3933 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3934 Desc.Legacy.Gen.u4Type));
3935 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3936 }
3937
3938 /* Check privileges for data segments and non-conforming code segments. */
3939 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3940 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3941 {
3942 /* The RPL and the new CPL must be less than or equal to the DPL. */
3943 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3944 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3945 {
3946 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3947 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3948 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3949 }
3950 }
3951
3952 /* Is it there? */
3953 if (!Desc.Legacy.Gen.u1Present)
3954 {
3955 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3956 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3957 }
3958
3959 /* The base and limit. */
3960 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3961 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3962
3963 /*
3964 * Ok, everything checked out fine. Now set the accessed bit before
3965 * committing the result into the registers.
3966 */
3967 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3968 {
3969 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3970 if (rcStrict != VINF_SUCCESS)
3971 return rcStrict;
3972 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3973 }
3974
3975 /* Commit */
3976 pSReg->Sel = uSel;
3977 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3978 pSReg->u32Limit = cbLimit;
3979 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3980 pSReg->ValidSel = uSel;
3981 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3982 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3983 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3984
3985 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3986 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3987 return VINF_SUCCESS;
3988}
3989
3990
3991/**
3992 * Performs a task switch.
3993 *
3994 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3995 * caller is responsible for performing the necessary checks (like DPL, TSS
3996 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3997 * reference for JMP, CALL, IRET.
3998 *
3999 * If the task switch is the due to a software interrupt or hardware exception,
4000 * the caller is responsible for validating the TSS selector and descriptor. See
4001 * Intel Instruction reference for INT n.
4002 *
4003 * @returns VBox strict status code.
4004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4005 * @param enmTaskSwitch The cause of the task switch.
4006 * @param uNextEip The EIP effective after the task switch.
4007 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
4008 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4009 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4010 * @param SelTSS The TSS selector of the new task.
4011 * @param pNewDescTSS Pointer to the new TSS descriptor.
4012 */
4013IEM_STATIC VBOXSTRICTRC
4014iemTaskSwitch(PVMCPU pVCpu,
4015 IEMTASKSWITCH enmTaskSwitch,
4016 uint32_t uNextEip,
4017 uint32_t fFlags,
4018 uint16_t uErr,
4019 uint64_t uCr2,
4020 RTSEL SelTSS,
4021 PIEMSELDESC pNewDescTSS)
4022{
4023 Assert(!IEM_IS_REAL_MODE(pVCpu));
4024 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
4025 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4026
4027 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
4028 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4029 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4030 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4031 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4032
4033 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
4034 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4035
4036 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
4037 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
4038
4039 /* Update CR2 in case it's a page-fault. */
4040 /** @todo This should probably be done much earlier in IEM/PGM. See
4041 * @bugref{5653#c49}. */
4042 if (fFlags & IEM_XCPT_FLAGS_CR2)
4043 pVCpu->cpum.GstCtx.cr2 = uCr2;
4044
4045 /*
4046 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
4047 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
4048 */
4049 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
4050 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
4051 if (uNewTSSLimit < uNewTSSLimitMin)
4052 {
4053 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
4054 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
4055 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4056 }
4057
4058 /*
4059 * Task switches in VMX non-root mode always cause task switches.
4060 * The new TSS must have been read and validated (DPL, limits etc.) before a
4061 * task-switch VM-exit commences.
4062 *
4063 * See Intel spec. 25.4.2 ".Treatment of Task Switches"
4064 */
4065 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
4066 {
4067 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
4068 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
4069 }
4070
4071 /*
4072 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
4073 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
4074 */
4075 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
4076 {
4077 uint32_t const uExitInfo1 = SelTSS;
4078 uint32_t uExitInfo2 = uErr;
4079 switch (enmTaskSwitch)
4080 {
4081 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
4082 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
4083 default: break;
4084 }
4085 if (fFlags & IEM_XCPT_FLAGS_ERR)
4086 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
4087 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
4088 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
4089
4090 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
4091 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
4092 RT_NOREF2(uExitInfo1, uExitInfo2);
4093 }
4094
4095 /*
4096 * Check the current TSS limit. The last written byte to the current TSS during the
4097 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4098 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4099 *
4100 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4101 * end up with smaller than "legal" TSS limits.
4102 */
4103 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4104 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4105 if (uCurTSSLimit < uCurTSSLimitMin)
4106 {
4107 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4108 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4109 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4110 }
4111
4112 /*
4113 * Verify that the new TSS can be accessed and map it. Map only the required contents
4114 * and not the entire TSS.
4115 */
4116 void *pvNewTSS;
4117 uint32_t cbNewTSS = uNewTSSLimitMin + 1;
4118 RTGCPTR GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4119 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4120 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4121 * not perform correct translation if this happens. See Intel spec. 7.2.1
4122 * "Task-State Segment" */
4123 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4124 if (rcStrict != VINF_SUCCESS)
4125 {
4126 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4127 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4128 return rcStrict;
4129 }
4130
4131 /*
4132 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4133 */
4134 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4135 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4136 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4137 {
4138 PX86DESC pDescCurTSS;
4139 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4140 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4141 if (rcStrict != VINF_SUCCESS)
4142 {
4143 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4144 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4145 return rcStrict;
4146 }
4147
4148 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4149 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4150 if (rcStrict != VINF_SUCCESS)
4151 {
4152 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4153 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4154 return rcStrict;
4155 }
4156
4157 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4158 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4159 {
4160 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4161 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4162 u32EFlags &= ~X86_EFL_NT;
4163 }
4164 }
4165
4166 /*
4167 * Save the CPU state into the current TSS.
4168 */
4169 RTGCPTR GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4170 if (GCPtrNewTSS == GCPtrCurTSS)
4171 {
4172 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4173 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4174 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4175 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4176 pVCpu->cpum.GstCtx.ldtr.Sel));
4177 }
4178 if (fIsNewTSS386)
4179 {
4180 /*
4181 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4182 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4183 */
4184 void *pvCurTSS32;
4185 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4186 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4187 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4188 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4189 if (rcStrict != VINF_SUCCESS)
4190 {
4191 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4192 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4193 return rcStrict;
4194 }
4195
4196 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4197 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4198 pCurTSS32->eip = uNextEip;
4199 pCurTSS32->eflags = u32EFlags;
4200 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4201 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4202 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4203 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4204 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4205 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4206 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4207 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4208 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4209 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4210 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4211 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4212 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4213 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4214
4215 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4216 if (rcStrict != VINF_SUCCESS)
4217 {
4218 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4219 VBOXSTRICTRC_VAL(rcStrict)));
4220 return rcStrict;
4221 }
4222 }
4223 else
4224 {
4225 /*
4226 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4227 */
4228 void *pvCurTSS16;
4229 uint32_t offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4230 uint32_t cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4231 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4232 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4233 if (rcStrict != VINF_SUCCESS)
4234 {
4235 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4236 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4237 return rcStrict;
4238 }
4239
4240 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4241 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4242 pCurTSS16->ip = uNextEip;
4243 pCurTSS16->flags = u32EFlags;
4244 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4245 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4246 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4247 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4248 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4249 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4250 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4251 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4252 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4253 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4254 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4255 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4256
4257 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4258 if (rcStrict != VINF_SUCCESS)
4259 {
4260 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4261 VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264 }
4265
4266 /*
4267 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4268 */
4269 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4270 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4271 {
4272 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4273 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4274 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4275 }
4276
4277 /*
4278 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4279 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4280 */
4281 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4282 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4283 bool fNewDebugTrap;
4284 if (fIsNewTSS386)
4285 {
4286 PX86TSS32 pNewTSS32 = (PX86TSS32)pvNewTSS;
4287 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4288 uNewEip = pNewTSS32->eip;
4289 uNewEflags = pNewTSS32->eflags;
4290 uNewEax = pNewTSS32->eax;
4291 uNewEcx = pNewTSS32->ecx;
4292 uNewEdx = pNewTSS32->edx;
4293 uNewEbx = pNewTSS32->ebx;
4294 uNewEsp = pNewTSS32->esp;
4295 uNewEbp = pNewTSS32->ebp;
4296 uNewEsi = pNewTSS32->esi;
4297 uNewEdi = pNewTSS32->edi;
4298 uNewES = pNewTSS32->es;
4299 uNewCS = pNewTSS32->cs;
4300 uNewSS = pNewTSS32->ss;
4301 uNewDS = pNewTSS32->ds;
4302 uNewFS = pNewTSS32->fs;
4303 uNewGS = pNewTSS32->gs;
4304 uNewLdt = pNewTSS32->selLdt;
4305 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4306 }
4307 else
4308 {
4309 PX86TSS16 pNewTSS16 = (PX86TSS16)pvNewTSS;
4310 uNewCr3 = 0;
4311 uNewEip = pNewTSS16->ip;
4312 uNewEflags = pNewTSS16->flags;
4313 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4314 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4315 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4316 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4317 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4318 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4319 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4320 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4321 uNewES = pNewTSS16->es;
4322 uNewCS = pNewTSS16->cs;
4323 uNewSS = pNewTSS16->ss;
4324 uNewDS = pNewTSS16->ds;
4325 uNewFS = 0;
4326 uNewGS = 0;
4327 uNewLdt = pNewTSS16->selLdt;
4328 fNewDebugTrap = false;
4329 }
4330
4331 if (GCPtrNewTSS == GCPtrCurTSS)
4332 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4333 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4334
4335 /*
4336 * We're done accessing the new TSS.
4337 */
4338 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4339 if (rcStrict != VINF_SUCCESS)
4340 {
4341 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4342 return rcStrict;
4343 }
4344
4345 /*
4346 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4347 */
4348 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4349 {
4350 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4351 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4352 if (rcStrict != VINF_SUCCESS)
4353 {
4354 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4355 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4356 return rcStrict;
4357 }
4358
4359 /* Check that the descriptor indicates the new TSS is available (not busy). */
4360 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4361 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4362 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4363
4364 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4365 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4366 if (rcStrict != VINF_SUCCESS)
4367 {
4368 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4369 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4370 return rcStrict;
4371 }
4372 }
4373
4374 /*
4375 * From this point on, we're technically in the new task. We will defer exceptions
4376 * until the completion of the task switch but before executing any instructions in the new task.
4377 */
4378 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4379 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4380 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4381 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4382 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4383 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4384 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4385
4386 /* Set the busy bit in TR. */
4387 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4388 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4389 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4390 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4391 {
4392 uNewEflags |= X86_EFL_NT;
4393 }
4394
4395 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4396 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4397 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4398
4399 pVCpu->cpum.GstCtx.eip = uNewEip;
4400 pVCpu->cpum.GstCtx.eax = uNewEax;
4401 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4402 pVCpu->cpum.GstCtx.edx = uNewEdx;
4403 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4404 pVCpu->cpum.GstCtx.esp = uNewEsp;
4405 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4406 pVCpu->cpum.GstCtx.esi = uNewEsi;
4407 pVCpu->cpum.GstCtx.edi = uNewEdi;
4408
4409 uNewEflags &= X86_EFL_LIVE_MASK;
4410 uNewEflags |= X86_EFL_RA1_MASK;
4411 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4412
4413 /*
4414 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4415 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4416 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4417 */
4418 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4419 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4420
4421 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4422 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4423
4424 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4425 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4426
4427 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4428 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4429
4430 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4431 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4432
4433 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4434 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4435 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4436
4437 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4438 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4439 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4440 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4441
4442 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4443 {
4444 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4445 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4446 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4447 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4448 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4449 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4450 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4451 }
4452
4453 /*
4454 * Switch CR3 for the new task.
4455 */
4456 if ( fIsNewTSS386
4457 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4458 {
4459 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4460 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4461 AssertRCSuccessReturn(rc, rc);
4462
4463 /* Inform PGM. */
4464 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4465 AssertRCReturn(rc, rc);
4466 /* ignore informational status codes */
4467
4468 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4469 }
4470
4471 /*
4472 * Switch LDTR for the new task.
4473 */
4474 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4475 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4476 else
4477 {
4478 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4479
4480 IEMSELDESC DescNewLdt;
4481 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4482 if (rcStrict != VINF_SUCCESS)
4483 {
4484 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4485 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4486 return rcStrict;
4487 }
4488 if ( !DescNewLdt.Legacy.Gen.u1Present
4489 || DescNewLdt.Legacy.Gen.u1DescType
4490 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4491 {
4492 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4493 uNewLdt, DescNewLdt.Legacy.u));
4494 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4495 }
4496
4497 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4498 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4499 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4500 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4501 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4502 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4503 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4504 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4505 }
4506
4507 IEMSELDESC DescSS;
4508 if (IEM_IS_V86_MODE(pVCpu))
4509 {
4510 pVCpu->iem.s.uCpl = 3;
4511 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4512 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4513 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4514 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4515 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4516 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4517
4518 /* quick fix: fake DescSS. */ /** @todo fix the code further down? */
4519 DescSS.Legacy.u = 0;
4520 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4521 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4522 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4523 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4524 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4525 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4526 DescSS.Legacy.Gen.u2Dpl = 3;
4527 }
4528 else
4529 {
4530 uint8_t uNewCpl = (uNewCS & X86_SEL_RPL);
4531
4532 /*
4533 * Load the stack segment for the new task.
4534 */
4535 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4536 {
4537 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4538 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4539 }
4540
4541 /* Fetch the descriptor. */
4542 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4543 if (rcStrict != VINF_SUCCESS)
4544 {
4545 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4546 VBOXSTRICTRC_VAL(rcStrict)));
4547 return rcStrict;
4548 }
4549
4550 /* SS must be a data segment and writable. */
4551 if ( !DescSS.Legacy.Gen.u1DescType
4552 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4553 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4554 {
4555 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4556 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4557 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4558 }
4559
4560 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4561 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4562 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4563 {
4564 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4565 uNewCpl));
4566 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4567 }
4568
4569 /* Is it there? */
4570 if (!DescSS.Legacy.Gen.u1Present)
4571 {
4572 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4573 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4574 }
4575
4576 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4577 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4578
4579 /* Set the accessed bit before committing the result into SS. */
4580 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4581 {
4582 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4583 if (rcStrict != VINF_SUCCESS)
4584 return rcStrict;
4585 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4586 }
4587
4588 /* Commit SS. */
4589 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4590 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4591 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4592 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4593 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4594 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4595 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4596
4597 /* CPL has changed, update IEM before loading rest of segments. */
4598 pVCpu->iem.s.uCpl = uNewCpl;
4599
4600 /*
4601 * Load the data segments for the new task.
4602 */
4603 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4604 if (rcStrict != VINF_SUCCESS)
4605 return rcStrict;
4606 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4607 if (rcStrict != VINF_SUCCESS)
4608 return rcStrict;
4609 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4610 if (rcStrict != VINF_SUCCESS)
4611 return rcStrict;
4612 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4613 if (rcStrict != VINF_SUCCESS)
4614 return rcStrict;
4615
4616 /*
4617 * Load the code segment for the new task.
4618 */
4619 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4620 {
4621 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4622 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4623 }
4624
4625 /* Fetch the descriptor. */
4626 IEMSELDESC DescCS;
4627 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4628 if (rcStrict != VINF_SUCCESS)
4629 {
4630 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4631 return rcStrict;
4632 }
4633
4634 /* CS must be a code segment. */
4635 if ( !DescCS.Legacy.Gen.u1DescType
4636 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4637 {
4638 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4639 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4640 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4641 }
4642
4643 /* For conforming CS, DPL must be less than or equal to the RPL. */
4644 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4645 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4646 {
4647 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4648 DescCS.Legacy.Gen.u2Dpl));
4649 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4650 }
4651
4652 /* For non-conforming CS, DPL must match RPL. */
4653 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4654 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4655 {
4656 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4657 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4658 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4659 }
4660
4661 /* Is it there? */
4662 if (!DescCS.Legacy.Gen.u1Present)
4663 {
4664 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4665 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4666 }
4667
4668 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4669 u64Base = X86DESC_BASE(&DescCS.Legacy);
4670
4671 /* Set the accessed bit before committing the result into CS. */
4672 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4673 {
4674 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4675 if (rcStrict != VINF_SUCCESS)
4676 return rcStrict;
4677 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4678 }
4679
4680 /* Commit CS. */
4681 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4682 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4683 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4684 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4685 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4686 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4687 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4688 }
4689
4690 /** @todo Debug trap. */
4691 if (fIsNewTSS386 && fNewDebugTrap)
4692 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4693
4694 /*
4695 * Construct the error code masks based on what caused this task switch.
4696 * See Intel Instruction reference for INT.
4697 */
4698 uint16_t uExt;
4699 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4700 && !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT))
4701 {
4702 uExt = 1;
4703 }
4704 else
4705 uExt = 0;
4706
4707 /*
4708 * Push any error code on to the new stack.
4709 */
4710 if (fFlags & IEM_XCPT_FLAGS_ERR)
4711 {
4712 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4713 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4714 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4715
4716 /* Check that there is sufficient space on the stack. */
4717 /** @todo Factor out segment limit checking for normal/expand down segments
4718 * into a separate function. */
4719 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4720 {
4721 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4722 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4723 {
4724 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4725 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4726 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4727 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4728 }
4729 }
4730 else
4731 {
4732 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4733 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4734 {
4735 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4736 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4737 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4738 }
4739 }
4740
4741
4742 if (fIsNewTSS386)
4743 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4744 else
4745 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4746 if (rcStrict != VINF_SUCCESS)
4747 {
4748 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4749 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4750 return rcStrict;
4751 }
4752 }
4753
4754 /* Check the new EIP against the new CS limit. */
4755 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4756 {
4757 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4758 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4759 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4760 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4761 }
4762
4763 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4764 pVCpu->cpum.GstCtx.ss.Sel));
4765 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4766}
4767
4768
4769/**
4770 * Implements exceptions and interrupts for protected mode.
4771 *
4772 * @returns VBox strict status code.
4773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4774 * @param cbInstr The number of bytes to offset rIP by in the return
4775 * address.
4776 * @param u8Vector The interrupt / exception vector number.
4777 * @param fFlags The flags.
4778 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4779 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4780 */
4781IEM_STATIC VBOXSTRICTRC
4782iemRaiseXcptOrIntInProtMode(PVMCPU pVCpu,
4783 uint8_t cbInstr,
4784 uint8_t u8Vector,
4785 uint32_t fFlags,
4786 uint16_t uErr,
4787 uint64_t uCr2)
4788{
4789 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4790
4791 /*
4792 * Read the IDT entry.
4793 */
4794 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4795 {
4796 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4797 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4798 }
4799 X86DESC Idte;
4800 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4801 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4802 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4803 {
4804 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4805 return rcStrict;
4806 }
4807 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4808 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4809 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4810
4811 /*
4812 * Check the descriptor type, DPL and such.
4813 * ASSUMES this is done in the same order as described for call-gate calls.
4814 */
4815 if (Idte.Gate.u1DescType)
4816 {
4817 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4818 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4819 }
4820 bool fTaskGate = false;
4821 uint8_t f32BitGate = true;
4822 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4823 switch (Idte.Gate.u4Type)
4824 {
4825 case X86_SEL_TYPE_SYS_UNDEFINED:
4826 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4827 case X86_SEL_TYPE_SYS_LDT:
4828 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4829 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4830 case X86_SEL_TYPE_SYS_UNDEFINED2:
4831 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4832 case X86_SEL_TYPE_SYS_UNDEFINED3:
4833 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4834 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4835 case X86_SEL_TYPE_SYS_UNDEFINED4:
4836 {
4837 /** @todo check what actually happens when the type is wrong...
4838 * esp. call gates. */
4839 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4840 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4841 }
4842
4843 case X86_SEL_TYPE_SYS_286_INT_GATE:
4844 f32BitGate = false;
4845 RT_FALL_THRU();
4846 case X86_SEL_TYPE_SYS_386_INT_GATE:
4847 fEflToClear |= X86_EFL_IF;
4848 break;
4849
4850 case X86_SEL_TYPE_SYS_TASK_GATE:
4851 fTaskGate = true;
4852#ifndef IEM_IMPLEMENTS_TASKSWITCH
4853 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4854#endif
4855 break;
4856
4857 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4858 f32BitGate = false;
4859 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4860 break;
4861
4862 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4863 }
4864
4865 /* Check DPL against CPL if applicable. */
4866 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4867 {
4868 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4869 {
4870 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4871 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4872 }
4873 }
4874
4875 /* Is it there? */
4876 if (!Idte.Gate.u1Present)
4877 {
4878 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4879 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4880 }
4881
4882 /* Is it a task-gate? */
4883 if (fTaskGate)
4884 {
4885 /*
4886 * Construct the error code masks based on what caused this task switch.
4887 * See Intel Instruction reference for INT.
4888 */
4889 uint16_t const uExt = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? 0 : 1;
4890 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4891 RTSEL SelTSS = Idte.Gate.u16Sel;
4892
4893 /*
4894 * Fetch the TSS descriptor in the GDT.
4895 */
4896 IEMSELDESC DescTSS;
4897 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4898 if (rcStrict != VINF_SUCCESS)
4899 {
4900 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4901 VBOXSTRICTRC_VAL(rcStrict)));
4902 return rcStrict;
4903 }
4904
4905 /* The TSS descriptor must be a system segment and be available (not busy). */
4906 if ( DescTSS.Legacy.Gen.u1DescType
4907 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4908 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4909 {
4910 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4911 u8Vector, SelTSS, DescTSS.Legacy.au64));
4912 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4913 }
4914
4915 /* The TSS must be present. */
4916 if (!DescTSS.Legacy.Gen.u1Present)
4917 {
4918 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4919 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4920 }
4921
4922 /* Do the actual task switch. */
4923 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4924 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4925 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4926 }
4927
4928 /* A null CS is bad. */
4929 RTSEL NewCS = Idte.Gate.u16Sel;
4930 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4931 {
4932 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4933 return iemRaiseGeneralProtectionFault0(pVCpu);
4934 }
4935
4936 /* Fetch the descriptor for the new CS. */
4937 IEMSELDESC DescCS;
4938 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4939 if (rcStrict != VINF_SUCCESS)
4940 {
4941 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4942 return rcStrict;
4943 }
4944
4945 /* Must be a code segment. */
4946 if (!DescCS.Legacy.Gen.u1DescType)
4947 {
4948 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4949 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4950 }
4951 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4952 {
4953 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4954 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4955 }
4956
4957 /* Don't allow lowering the privilege level. */
4958 /** @todo Does the lowering of privileges apply to software interrupts
4959 * only? This has bearings on the more-privileged or
4960 * same-privilege stack behavior further down. A testcase would
4961 * be nice. */
4962 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4963 {
4964 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4965 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4966 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4967 }
4968
4969 /* Make sure the selector is present. */
4970 if (!DescCS.Legacy.Gen.u1Present)
4971 {
4972 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4973 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4974 }
4975
4976 /* Check the new EIP against the new CS limit. */
4977 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4978 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4979 ? Idte.Gate.u16OffsetLow
4980 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4981 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4982 if (uNewEip > cbLimitCS)
4983 {
4984 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4985 u8Vector, uNewEip, cbLimitCS, NewCS));
4986 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4987 }
4988 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4989
4990 /* Calc the flag image to push. */
4991 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4992 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4993 fEfl &= ~X86_EFL_RF;
4994 else
4995 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4996
4997 /* From V8086 mode only go to CPL 0. */
4998 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4999 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5000 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
5001 {
5002 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
5003 return iemRaiseGeneralProtectionFault(pVCpu, 0);
5004 }
5005
5006 /*
5007 * If the privilege level changes, we need to get a new stack from the TSS.
5008 * This in turns means validating the new SS and ESP...
5009 */
5010 if (uNewCpl != pVCpu->iem.s.uCpl)
5011 {
5012 RTSEL NewSS;
5013 uint32_t uNewEsp;
5014 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
5015 if (rcStrict != VINF_SUCCESS)
5016 return rcStrict;
5017
5018 IEMSELDESC DescSS;
5019 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
5020 if (rcStrict != VINF_SUCCESS)
5021 return rcStrict;
5022 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
5023 if (!DescSS.Legacy.Gen.u1DefBig)
5024 {
5025 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
5026 uNewEsp = (uint16_t)uNewEsp;
5027 }
5028
5029 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5030
5031 /* Check that there is sufficient space for the stack frame. */
5032 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
5033 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
5034 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
5035 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
5036
5037 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
5038 {
5039 if ( uNewEsp - 1 > cbLimitSS
5040 || uNewEsp < cbStackFrame)
5041 {
5042 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
5043 u8Vector, NewSS, uNewEsp, cbStackFrame));
5044 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5045 }
5046 }
5047 else
5048 {
5049 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
5050 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
5051 {
5052 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
5053 u8Vector, NewSS, uNewEsp, cbStackFrame));
5054 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
5055 }
5056 }
5057
5058 /*
5059 * Start making changes.
5060 */
5061
5062 /* Set the new CPL so that stack accesses use it. */
5063 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5064 pVCpu->iem.s.uCpl = uNewCpl;
5065
5066 /* Create the stack frame. */
5067 RTPTRUNION uStackFrame;
5068 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5069 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5070 if (rcStrict != VINF_SUCCESS)
5071 return rcStrict;
5072 void * const pvStackFrame = uStackFrame.pv;
5073 if (f32BitGate)
5074 {
5075 if (fFlags & IEM_XCPT_FLAGS_ERR)
5076 *uStackFrame.pu32++ = uErr;
5077 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5078 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5079 uStackFrame.pu32[2] = fEfl;
5080 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
5081 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
5082 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
5083 if (fEfl & X86_EFL_VM)
5084 {
5085 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
5086 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
5087 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
5088 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5089 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5090 }
5091 }
5092 else
5093 {
5094 if (fFlags & IEM_XCPT_FLAGS_ERR)
5095 *uStackFrame.pu16++ = uErr;
5096 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5097 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5098 uStackFrame.pu16[2] = fEfl;
5099 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5100 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5101 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5102 if (fEfl & X86_EFL_VM)
5103 {
5104 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5105 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5106 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5107 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5108 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5109 }
5110 }
5111 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5112 if (rcStrict != VINF_SUCCESS)
5113 return rcStrict;
5114
5115 /* Mark the selectors 'accessed' (hope this is the correct time). */
5116 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5117 * after pushing the stack frame? (Write protect the gdt + stack to
5118 * find out.) */
5119 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5120 {
5121 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5122 if (rcStrict != VINF_SUCCESS)
5123 return rcStrict;
5124 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5125 }
5126
5127 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5128 {
5129 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5130 if (rcStrict != VINF_SUCCESS)
5131 return rcStrict;
5132 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5133 }
5134
5135 /*
5136 * Start comitting the register changes (joins with the DPL=CPL branch).
5137 */
5138 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5139 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5140 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5141 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5142 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5143 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5144 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5145 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5146 * SP is loaded).
5147 * Need to check the other combinations too:
5148 * - 16-bit TSS, 32-bit handler
5149 * - 32-bit TSS, 16-bit handler */
5150 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5151 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5152 else
5153 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5154
5155 if (fEfl & X86_EFL_VM)
5156 {
5157 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5158 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5159 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5160 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5161 }
5162 }
5163 /*
5164 * Same privilege, no stack change and smaller stack frame.
5165 */
5166 else
5167 {
5168 uint64_t uNewRsp;
5169 RTPTRUNION uStackFrame;
5170 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5171 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5172 if (rcStrict != VINF_SUCCESS)
5173 return rcStrict;
5174 void * const pvStackFrame = uStackFrame.pv;
5175
5176 if (f32BitGate)
5177 {
5178 if (fFlags & IEM_XCPT_FLAGS_ERR)
5179 *uStackFrame.pu32++ = uErr;
5180 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5181 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5182 uStackFrame.pu32[2] = fEfl;
5183 }
5184 else
5185 {
5186 if (fFlags & IEM_XCPT_FLAGS_ERR)
5187 *uStackFrame.pu16++ = uErr;
5188 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5189 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5190 uStackFrame.pu16[2] = fEfl;
5191 }
5192 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5193 if (rcStrict != VINF_SUCCESS)
5194 return rcStrict;
5195
5196 /* Mark the CS selector as 'accessed'. */
5197 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5198 {
5199 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5200 if (rcStrict != VINF_SUCCESS)
5201 return rcStrict;
5202 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5203 }
5204
5205 /*
5206 * Start committing the register changes (joins with the other branch).
5207 */
5208 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5209 }
5210
5211 /* ... register committing continues. */
5212 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5213 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5214 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5215 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5216 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5217 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5218
5219 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5220 fEfl &= ~fEflToClear;
5221 IEMMISC_SET_EFL(pVCpu, fEfl);
5222
5223 if (fFlags & IEM_XCPT_FLAGS_CR2)
5224 pVCpu->cpum.GstCtx.cr2 = uCr2;
5225
5226 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5227 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5228
5229 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5230}
5231
5232
5233/**
5234 * Implements exceptions and interrupts for long mode.
5235 *
5236 * @returns VBox strict status code.
5237 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5238 * @param cbInstr The number of bytes to offset rIP by in the return
5239 * address.
5240 * @param u8Vector The interrupt / exception vector number.
5241 * @param fFlags The flags.
5242 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5243 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5244 */
5245IEM_STATIC VBOXSTRICTRC
5246iemRaiseXcptOrIntInLongMode(PVMCPU pVCpu,
5247 uint8_t cbInstr,
5248 uint8_t u8Vector,
5249 uint32_t fFlags,
5250 uint16_t uErr,
5251 uint64_t uCr2)
5252{
5253 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5254
5255 /*
5256 * Read the IDT entry.
5257 */
5258 uint16_t offIdt = (uint16_t)u8Vector << 4;
5259 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5260 {
5261 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5262 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5263 }
5264 X86DESC64 Idte;
5265 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5266 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5267 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5268 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5269 {
5270 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5271 return rcStrict;
5272 }
5273 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5274 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5275 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5276
5277 /*
5278 * Check the descriptor type, DPL and such.
5279 * ASSUMES this is done in the same order as described for call-gate calls.
5280 */
5281 if (Idte.Gate.u1DescType)
5282 {
5283 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5284 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5285 }
5286 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5287 switch (Idte.Gate.u4Type)
5288 {
5289 case AMD64_SEL_TYPE_SYS_INT_GATE:
5290 fEflToClear |= X86_EFL_IF;
5291 break;
5292 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5293 break;
5294
5295 default:
5296 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5297 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5298 }
5299
5300 /* Check DPL against CPL if applicable. */
5301 if (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
5302 {
5303 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5304 {
5305 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5306 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5307 }
5308 }
5309
5310 /* Is it there? */
5311 if (!Idte.Gate.u1Present)
5312 {
5313 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5314 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5315 }
5316
5317 /* A null CS is bad. */
5318 RTSEL NewCS = Idte.Gate.u16Sel;
5319 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5320 {
5321 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5322 return iemRaiseGeneralProtectionFault0(pVCpu);
5323 }
5324
5325 /* Fetch the descriptor for the new CS. */
5326 IEMSELDESC DescCS;
5327 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5328 if (rcStrict != VINF_SUCCESS)
5329 {
5330 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5331 return rcStrict;
5332 }
5333
5334 /* Must be a 64-bit code segment. */
5335 if (!DescCS.Long.Gen.u1DescType)
5336 {
5337 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5338 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5339 }
5340 if ( !DescCS.Long.Gen.u1Long
5341 || DescCS.Long.Gen.u1DefBig
5342 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5343 {
5344 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5345 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5346 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5347 }
5348
5349 /* Don't allow lowering the privilege level. For non-conforming CS
5350 selectors, the CS.DPL sets the privilege level the trap/interrupt
5351 handler runs at. For conforming CS selectors, the CPL remains
5352 unchanged, but the CS.DPL must be <= CPL. */
5353 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5354 * when CPU in Ring-0. Result \#GP? */
5355 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5356 {
5357 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5358 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5359 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5360 }
5361
5362
5363 /* Make sure the selector is present. */
5364 if (!DescCS.Legacy.Gen.u1Present)
5365 {
5366 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5367 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5368 }
5369
5370 /* Check that the new RIP is canonical. */
5371 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5372 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5373 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5374 if (!IEM_IS_CANONICAL(uNewRip))
5375 {
5376 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5377 return iemRaiseGeneralProtectionFault0(pVCpu);
5378 }
5379
5380 /*
5381 * If the privilege level changes or if the IST isn't zero, we need to get
5382 * a new stack from the TSS.
5383 */
5384 uint64_t uNewRsp;
5385 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5386 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5387 if ( uNewCpl != pVCpu->iem.s.uCpl
5388 || Idte.Gate.u3IST != 0)
5389 {
5390 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5391 if (rcStrict != VINF_SUCCESS)
5392 return rcStrict;
5393 }
5394 else
5395 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5396 uNewRsp &= ~(uint64_t)0xf;
5397
5398 /*
5399 * Calc the flag image to push.
5400 */
5401 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5402 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5403 fEfl &= ~X86_EFL_RF;
5404 else
5405 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5406
5407 /*
5408 * Start making changes.
5409 */
5410 /* Set the new CPL so that stack accesses use it. */
5411 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5412 pVCpu->iem.s.uCpl = uNewCpl;
5413
5414 /* Create the stack frame. */
5415 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5416 RTPTRUNION uStackFrame;
5417 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5418 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5419 if (rcStrict != VINF_SUCCESS)
5420 return rcStrict;
5421 void * const pvStackFrame = uStackFrame.pv;
5422
5423 if (fFlags & IEM_XCPT_FLAGS_ERR)
5424 *uStackFrame.pu64++ = uErr;
5425 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5426 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5427 uStackFrame.pu64[2] = fEfl;
5428 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5429 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5430 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5431 if (rcStrict != VINF_SUCCESS)
5432 return rcStrict;
5433
5434 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5435 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5436 * after pushing the stack frame? (Write protect the gdt + stack to
5437 * find out.) */
5438 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5439 {
5440 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5441 if (rcStrict != VINF_SUCCESS)
5442 return rcStrict;
5443 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5444 }
5445
5446 /*
5447 * Start comitting the register changes.
5448 */
5449 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5450 * hidden registers when interrupting 32-bit or 16-bit code! */
5451 if (uNewCpl != uOldCpl)
5452 {
5453 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5454 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5455 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5456 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5457 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5458 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5459 }
5460 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5461 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5462 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5463 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5464 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5465 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5466 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5467 pVCpu->cpum.GstCtx.rip = uNewRip;
5468
5469 fEfl &= ~fEflToClear;
5470 IEMMISC_SET_EFL(pVCpu, fEfl);
5471
5472 if (fFlags & IEM_XCPT_FLAGS_CR2)
5473 pVCpu->cpum.GstCtx.cr2 = uCr2;
5474
5475 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5476 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5477
5478 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5479}
5480
5481
5482/**
5483 * Implements exceptions and interrupts.
5484 *
5485 * All exceptions and interrupts goes thru this function!
5486 *
5487 * @returns VBox strict status code.
5488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5489 * @param cbInstr The number of bytes to offset rIP by in the return
5490 * address.
5491 * @param u8Vector The interrupt / exception vector number.
5492 * @param fFlags The flags.
5493 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5494 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5495 */
5496DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5497iemRaiseXcptOrInt(PVMCPU pVCpu,
5498 uint8_t cbInstr,
5499 uint8_t u8Vector,
5500 uint32_t fFlags,
5501 uint16_t uErr,
5502 uint64_t uCr2)
5503{
5504 /*
5505 * Get all the state that we might need here.
5506 */
5507 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5508 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5509
5510#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5511 /*
5512 * Flush prefetch buffer
5513 */
5514 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5515#endif
5516
5517 /*
5518 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5519 */
5520 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5521 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5522 && (fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_BP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5523 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5524 {
5525 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5526 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5527 u8Vector = X86_XCPT_GP;
5528 uErr = 0;
5529 }
5530#ifdef DBGFTRACE_ENABLED
5531 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5532 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5533 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5534#endif
5535
5536#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5537 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5538 {
5539 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5540 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5541 return rcStrict0;
5542 }
5543#endif
5544
5545#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5546 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5547 {
5548 /*
5549 * If the event is being injected as part of VMRUN, it isn't subject to event
5550 * intercepts in the nested-guest. However, secondary exceptions that occur
5551 * during injection of any event -are- subject to exception intercepts.
5552 *
5553 * See AMD spec. 15.20 "Event Injection".
5554 */
5555 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5556 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5557 else
5558 {
5559 /*
5560 * Check and handle if the event being raised is intercepted.
5561 */
5562 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5563 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5564 return rcStrict0;
5565 }
5566 }
5567#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
5568
5569 /*
5570 * Do recursion accounting.
5571 */
5572 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5573 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5574 if (pVCpu->iem.s.cXcptRecursions == 0)
5575 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5576 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5577 else
5578 {
5579 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5580 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5581 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5582
5583 if (pVCpu->iem.s.cXcptRecursions >= 4)
5584 {
5585#ifdef DEBUG_bird
5586 AssertFailed();
5587#endif
5588 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5589 }
5590
5591 /*
5592 * Evaluate the sequence of recurring events.
5593 */
5594 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5595 NULL /* pXcptRaiseInfo */);
5596 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5597 { /* likely */ }
5598 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5599 {
5600 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5601 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5602 u8Vector = X86_XCPT_DF;
5603 uErr = 0;
5604 /** @todo NSTVMX: Do we need to do something here for VMX? */
5605 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5606 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5607 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5608 }
5609 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5610 {
5611 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5612 return iemInitiateCpuShutdown(pVCpu);
5613 }
5614 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5615 {
5616 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5617 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5618 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5619 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5620 return VERR_EM_GUEST_CPU_HANG;
5621 }
5622 else
5623 {
5624 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5625 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5626 return VERR_IEM_IPE_9;
5627 }
5628
5629 /*
5630 * The 'EXT' bit is set when an exception occurs during deliver of an external
5631 * event (such as an interrupt or earlier exception)[1]. Privileged software
5632 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5633 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5634 *
5635 * [1] - Intel spec. 6.13 "Error Code"
5636 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5637 * [3] - Intel Instruction reference for INT n.
5638 */
5639 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5640 && (fFlags & IEM_XCPT_FLAGS_ERR)
5641 && u8Vector != X86_XCPT_PF
5642 && u8Vector != X86_XCPT_DF)
5643 {
5644 uErr |= X86_TRAP_ERR_EXTERNAL;
5645 }
5646 }
5647
5648 pVCpu->iem.s.cXcptRecursions++;
5649 pVCpu->iem.s.uCurXcpt = u8Vector;
5650 pVCpu->iem.s.fCurXcpt = fFlags;
5651 pVCpu->iem.s.uCurXcptErr = uErr;
5652 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5653
5654 /*
5655 * Extensive logging.
5656 */
5657#if defined(LOG_ENABLED) && defined(IN_RING3)
5658 if (LogIs3Enabled())
5659 {
5660 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5661 PVM pVM = pVCpu->CTX_SUFF(pVM);
5662 char szRegs[4096];
5663 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5664 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5665 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5666 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5667 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5668 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5669 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5670 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5671 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5672 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5673 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5674 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5675 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5676 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5677 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5678 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5679 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5680 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5681 " efer=%016VR{efer}\n"
5682 " pat=%016VR{pat}\n"
5683 " sf_mask=%016VR{sf_mask}\n"
5684 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5685 " lstar=%016VR{lstar}\n"
5686 " star=%016VR{star} cstar=%016VR{cstar}\n"
5687 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5688 );
5689
5690 char szInstr[256];
5691 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5692 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5693 szInstr, sizeof(szInstr), NULL);
5694 Log3(("%s%s\n", szRegs, szInstr));
5695 }
5696#endif /* LOG_ENABLED */
5697
5698 /*
5699 * Call the mode specific worker function.
5700 */
5701 VBOXSTRICTRC rcStrict;
5702 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5703 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5704 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5705 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5706 else
5707 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5708
5709 /* Flush the prefetch buffer. */
5710#ifdef IEM_WITH_CODE_TLB
5711 pVCpu->iem.s.pbInstrBuf = NULL;
5712#else
5713 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5714#endif
5715
5716 /*
5717 * Unwind.
5718 */
5719 pVCpu->iem.s.cXcptRecursions--;
5720 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5721 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5722 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5723 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5724 pVCpu->iem.s.cXcptRecursions + 1));
5725 return rcStrict;
5726}
5727
5728#ifdef IEM_WITH_SETJMP
5729/**
5730 * See iemRaiseXcptOrInt. Will not return.
5731 */
5732IEM_STATIC DECL_NO_RETURN(void)
5733iemRaiseXcptOrIntJmp(PVMCPU pVCpu,
5734 uint8_t cbInstr,
5735 uint8_t u8Vector,
5736 uint32_t fFlags,
5737 uint16_t uErr,
5738 uint64_t uCr2)
5739{
5740 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5741 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5742}
5743#endif
5744
5745
5746/** \#DE - 00. */
5747DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPU pVCpu)
5748{
5749 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5750}
5751
5752
5753/** \#DB - 01.
5754 * @note This automatically clear DR7.GD. */
5755DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPU pVCpu)
5756{
5757 /** @todo set/clear RF. */
5758 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5759 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5760}
5761
5762
5763/** \#BR - 05. */
5764DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPU pVCpu)
5765{
5766 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5767}
5768
5769
5770/** \#UD - 06. */
5771DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPU pVCpu)
5772{
5773 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5774}
5775
5776
5777/** \#NM - 07. */
5778DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPU pVCpu)
5779{
5780 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5781}
5782
5783
5784/** \#TS(err) - 0a. */
5785DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPU pVCpu, uint16_t uErr)
5786{
5787 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5788}
5789
5790
5791/** \#TS(tr) - 0a. */
5792DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPU pVCpu)
5793{
5794 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5795 pVCpu->cpum.GstCtx.tr.Sel, 0);
5796}
5797
5798
5799/** \#TS(0) - 0a. */
5800DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPU pVCpu)
5801{
5802 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5803 0, 0);
5804}
5805
5806
5807/** \#TS(err) - 0a. */
5808DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPU pVCpu, uint16_t uSel)
5809{
5810 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5811 uSel & X86_SEL_MASK_OFF_RPL, 0);
5812}
5813
5814
5815/** \#NP(err) - 0b. */
5816DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5817{
5818 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5819}
5820
5821
5822/** \#NP(sel) - 0b. */
5823DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5824{
5825 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5826 uSel & ~X86_SEL_RPL, 0);
5827}
5828
5829
5830/** \#SS(seg) - 0c. */
5831DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPU pVCpu, uint16_t uSel)
5832{
5833 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5834 uSel & ~X86_SEL_RPL, 0);
5835}
5836
5837
5838/** \#SS(err) - 0c. */
5839DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPU pVCpu, uint16_t uErr)
5840{
5841 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5842}
5843
5844
5845/** \#GP(n) - 0d. */
5846DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPU pVCpu, uint16_t uErr)
5847{
5848 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5849}
5850
5851
5852/** \#GP(0) - 0d. */
5853DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPU pVCpu)
5854{
5855 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5856}
5857
5858#ifdef IEM_WITH_SETJMP
5859/** \#GP(0) - 0d. */
5860DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPU pVCpu)
5861{
5862 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5863}
5864#endif
5865
5866
5867/** \#GP(sel) - 0d. */
5868DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPU pVCpu, RTSEL Sel)
5869{
5870 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5871 Sel & ~X86_SEL_RPL, 0);
5872}
5873
5874
5875/** \#GP(0) - 0d. */
5876DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPU pVCpu)
5877{
5878 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5879}
5880
5881
5882/** \#GP(sel) - 0d. */
5883DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5884{
5885 NOREF(iSegReg); NOREF(fAccess);
5886 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5887 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5888}
5889
5890#ifdef IEM_WITH_SETJMP
5891/** \#GP(sel) - 0d, longjmp. */
5892DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5893{
5894 NOREF(iSegReg); NOREF(fAccess);
5895 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5896 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5897}
5898#endif
5899
5900/** \#GP(sel) - 0d. */
5901DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPU pVCpu, RTSEL Sel)
5902{
5903 NOREF(Sel);
5904 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5905}
5906
5907#ifdef IEM_WITH_SETJMP
5908/** \#GP(sel) - 0d, longjmp. */
5909DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPU pVCpu, RTSEL Sel)
5910{
5911 NOREF(Sel);
5912 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5913}
5914#endif
5915
5916
5917/** \#GP(sel) - 0d. */
5918DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPU pVCpu, uint32_t iSegReg, uint32_t fAccess)
5919{
5920 NOREF(iSegReg); NOREF(fAccess);
5921 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5922}
5923
5924#ifdef IEM_WITH_SETJMP
5925/** \#GP(sel) - 0d, longjmp. */
5926DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPU pVCpu, uint32_t iSegReg,
5927 uint32_t fAccess)
5928{
5929 NOREF(iSegReg); NOREF(fAccess);
5930 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5931}
5932#endif
5933
5934
5935/** \#PF(n) - 0e. */
5936DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5937{
5938 uint16_t uErr;
5939 switch (rc)
5940 {
5941 case VERR_PAGE_NOT_PRESENT:
5942 case VERR_PAGE_TABLE_NOT_PRESENT:
5943 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5944 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5945 uErr = 0;
5946 break;
5947
5948 default:
5949 AssertMsgFailed(("%Rrc\n", rc));
5950 RT_FALL_THRU();
5951 case VERR_ACCESS_DENIED:
5952 uErr = X86_TRAP_PF_P;
5953 break;
5954
5955 /** @todo reserved */
5956 }
5957
5958 if (pVCpu->iem.s.uCpl == 3)
5959 uErr |= X86_TRAP_PF_US;
5960
5961 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5962 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5963 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5964 uErr |= X86_TRAP_PF_ID;
5965
5966#if 0 /* This is so much non-sense, really. Why was it done like that? */
5967 /* Note! RW access callers reporting a WRITE protection fault, will clear
5968 the READ flag before calling. So, read-modify-write accesses (RW)
5969 can safely be reported as READ faults. */
5970 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5971 uErr |= X86_TRAP_PF_RW;
5972#else
5973 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5974 {
5975 if (!(fAccess & IEM_ACCESS_TYPE_READ))
5976 uErr |= X86_TRAP_PF_RW;
5977 }
5978#endif
5979
5980 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5981 uErr, GCPtrWhere);
5982}
5983
5984#ifdef IEM_WITH_SETJMP
5985/** \#PF(n) - 0e, longjmp. */
5986IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPU pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5987{
5988 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5989}
5990#endif
5991
5992
5993/** \#MF(0) - 10. */
5994DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPU pVCpu)
5995{
5996 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5997}
5998
5999
6000/** \#AC(0) - 11. */
6001DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPU pVCpu)
6002{
6003 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6004}
6005
6006
6007/**
6008 * Macro for calling iemCImplRaiseDivideError().
6009 *
6010 * This enables us to add/remove arguments and force different levels of
6011 * inlining as we wish.
6012 *
6013 * @return Strict VBox status code.
6014 */
6015#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
6016IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
6017{
6018 NOREF(cbInstr);
6019 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6020}
6021
6022
6023/**
6024 * Macro for calling iemCImplRaiseInvalidLockPrefix().
6025 *
6026 * This enables us to add/remove arguments and force different levels of
6027 * inlining as we wish.
6028 *
6029 * @return Strict VBox status code.
6030 */
6031#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
6032IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
6033{
6034 NOREF(cbInstr);
6035 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6036}
6037
6038
6039/**
6040 * Macro for calling iemCImplRaiseInvalidOpcode().
6041 *
6042 * This enables us to add/remove arguments and force different levels of
6043 * inlining as we wish.
6044 *
6045 * @return Strict VBox status code.
6046 */
6047#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6048IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6049{
6050 NOREF(cbInstr);
6051 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6052}
6053
6054
6055/** @} */
6056
6057
6058/*
6059 *
6060 * Helpers routines.
6061 * Helpers routines.
6062 * Helpers routines.
6063 *
6064 */
6065
6066/**
6067 * Recalculates the effective operand size.
6068 *
6069 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6070 */
6071IEM_STATIC void iemRecalEffOpSize(PVMCPU pVCpu)
6072{
6073 switch (pVCpu->iem.s.enmCpuMode)
6074 {
6075 case IEMMODE_16BIT:
6076 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6077 break;
6078 case IEMMODE_32BIT:
6079 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6080 break;
6081 case IEMMODE_64BIT:
6082 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6083 {
6084 case 0:
6085 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6086 break;
6087 case IEM_OP_PRF_SIZE_OP:
6088 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6089 break;
6090 case IEM_OP_PRF_SIZE_REX_W:
6091 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6092 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6093 break;
6094 }
6095 break;
6096 default:
6097 AssertFailed();
6098 }
6099}
6100
6101
6102/**
6103 * Sets the default operand size to 64-bit and recalculates the effective
6104 * operand size.
6105 *
6106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6107 */
6108IEM_STATIC void iemRecalEffOpSize64Default(PVMCPU pVCpu)
6109{
6110 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6111 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6112 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6113 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6114 else
6115 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6116}
6117
6118
6119/*
6120 *
6121 * Common opcode decoders.
6122 * Common opcode decoders.
6123 * Common opcode decoders.
6124 *
6125 */
6126//#include <iprt/mem.h>
6127
6128/**
6129 * Used to add extra details about a stub case.
6130 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6131 */
6132IEM_STATIC void iemOpStubMsg2(PVMCPU pVCpu)
6133{
6134#if defined(LOG_ENABLED) && defined(IN_RING3)
6135 PVM pVM = pVCpu->CTX_SUFF(pVM);
6136 char szRegs[4096];
6137 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6138 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6139 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6140 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6141 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6142 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6143 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6144 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6145 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6146 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6147 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6148 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6149 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6150 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6151 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6152 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6153 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6154 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6155 " efer=%016VR{efer}\n"
6156 " pat=%016VR{pat}\n"
6157 " sf_mask=%016VR{sf_mask}\n"
6158 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6159 " lstar=%016VR{lstar}\n"
6160 " star=%016VR{star} cstar=%016VR{cstar}\n"
6161 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6162 );
6163
6164 char szInstr[256];
6165 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6166 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6167 szInstr, sizeof(szInstr), NULL);
6168
6169 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6170#else
6171 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6172#endif
6173}
6174
6175/**
6176 * Complains about a stub.
6177 *
6178 * Providing two versions of this macro, one for daily use and one for use when
6179 * working on IEM.
6180 */
6181#if 0
6182# define IEMOP_BITCH_ABOUT_STUB() \
6183 do { \
6184 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6185 iemOpStubMsg2(pVCpu); \
6186 RTAssertPanic(); \
6187 } while (0)
6188#else
6189# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6190#endif
6191
6192/** Stubs an opcode. */
6193#define FNIEMOP_STUB(a_Name) \
6194 FNIEMOP_DEF(a_Name) \
6195 { \
6196 RT_NOREF_PV(pVCpu); \
6197 IEMOP_BITCH_ABOUT_STUB(); \
6198 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6199 } \
6200 typedef int ignore_semicolon
6201
6202/** Stubs an opcode. */
6203#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6204 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6205 { \
6206 RT_NOREF_PV(pVCpu); \
6207 RT_NOREF_PV(a_Name0); \
6208 IEMOP_BITCH_ABOUT_STUB(); \
6209 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6210 } \
6211 typedef int ignore_semicolon
6212
6213/** Stubs an opcode which currently should raise \#UD. */
6214#define FNIEMOP_UD_STUB(a_Name) \
6215 FNIEMOP_DEF(a_Name) \
6216 { \
6217 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6218 return IEMOP_RAISE_INVALID_OPCODE(); \
6219 } \
6220 typedef int ignore_semicolon
6221
6222/** Stubs an opcode which currently should raise \#UD. */
6223#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6224 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6225 { \
6226 RT_NOREF_PV(pVCpu); \
6227 RT_NOREF_PV(a_Name0); \
6228 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6229 return IEMOP_RAISE_INVALID_OPCODE(); \
6230 } \
6231 typedef int ignore_semicolon
6232
6233
6234
6235/** @name Register Access.
6236 * @{
6237 */
6238
6239/**
6240 * Gets a reference (pointer) to the specified hidden segment register.
6241 *
6242 * @returns Hidden register reference.
6243 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6244 * @param iSegReg The segment register.
6245 */
6246IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPU pVCpu, uint8_t iSegReg)
6247{
6248 Assert(iSegReg < X86_SREG_COUNT);
6249 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6250 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6251
6252#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6253 if (RT_LIKELY(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg)))
6254 { /* likely */ }
6255 else
6256 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6257#else
6258 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6259#endif
6260 return pSReg;
6261}
6262
6263
6264/**
6265 * Ensures that the given hidden segment register is up to date.
6266 *
6267 * @returns Hidden register reference.
6268 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6269 * @param pSReg The segment register.
6270 */
6271IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPU pVCpu, PCPUMSELREG pSReg)
6272{
6273#ifdef VBOX_WITH_RAW_MODE_NOT_R0
6274 if (!CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg))
6275 CPUMGuestLazyLoadHiddenSelectorReg(pVCpu, pSReg);
6276#else
6277 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6278 NOREF(pVCpu);
6279#endif
6280 return pSReg;
6281}
6282
6283
6284/**
6285 * Gets a reference (pointer) to the specified segment register (the selector
6286 * value).
6287 *
6288 * @returns Pointer to the selector variable.
6289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6290 * @param iSegReg The segment register.
6291 */
6292DECLINLINE(uint16_t *) iemSRegRef(PVMCPU pVCpu, uint8_t iSegReg)
6293{
6294 Assert(iSegReg < X86_SREG_COUNT);
6295 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6296 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6297}
6298
6299
6300/**
6301 * Fetches the selector value of a segment register.
6302 *
6303 * @returns The selector value.
6304 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6305 * @param iSegReg The segment register.
6306 */
6307DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPU pVCpu, uint8_t iSegReg)
6308{
6309 Assert(iSegReg < X86_SREG_COUNT);
6310 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6311 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6312}
6313
6314
6315/**
6316 * Fetches the base address value of a segment register.
6317 *
6318 * @returns The selector value.
6319 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6320 * @param iSegReg The segment register.
6321 */
6322DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg)
6323{
6324 Assert(iSegReg < X86_SREG_COUNT);
6325 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6326 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6327}
6328
6329
6330/**
6331 * Gets a reference (pointer) to the specified general purpose register.
6332 *
6333 * @returns Register reference.
6334 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6335 * @param iReg The general purpose register.
6336 */
6337DECLINLINE(void *) iemGRegRef(PVMCPU pVCpu, uint8_t iReg)
6338{
6339 Assert(iReg < 16);
6340 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6341}
6342
6343
6344/**
6345 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6346 *
6347 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6348 *
6349 * @returns Register reference.
6350 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6351 * @param iReg The register.
6352 */
6353DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPU pVCpu, uint8_t iReg)
6354{
6355 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6356 {
6357 Assert(iReg < 16);
6358 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6359 }
6360 /* high 8-bit register. */
6361 Assert(iReg < 8);
6362 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6363}
6364
6365
6366/**
6367 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6368 *
6369 * @returns Register reference.
6370 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6371 * @param iReg The register.
6372 */
6373DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPU pVCpu, uint8_t iReg)
6374{
6375 Assert(iReg < 16);
6376 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6377}
6378
6379
6380/**
6381 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6382 *
6383 * @returns Register reference.
6384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6385 * @param iReg The register.
6386 */
6387DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPU pVCpu, uint8_t iReg)
6388{
6389 Assert(iReg < 16);
6390 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6391}
6392
6393
6394/**
6395 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6396 *
6397 * @returns Register reference.
6398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6399 * @param iReg The register.
6400 */
6401DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPU pVCpu, uint8_t iReg)
6402{
6403 Assert(iReg < 64);
6404 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6405}
6406
6407
6408/**
6409 * Gets a reference (pointer) to the specified segment register's base address.
6410 *
6411 * @returns Segment register base address reference.
6412 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6413 * @param iSegReg The segment selector.
6414 */
6415DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPU pVCpu, uint8_t iSegReg)
6416{
6417 Assert(iSegReg < X86_SREG_COUNT);
6418 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6419 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6420}
6421
6422
6423/**
6424 * Fetches the value of a 8-bit general purpose register.
6425 *
6426 * @returns The register value.
6427 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6428 * @param iReg The register.
6429 */
6430DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPU pVCpu, uint8_t iReg)
6431{
6432 return *iemGRegRefU8(pVCpu, iReg);
6433}
6434
6435
6436/**
6437 * Fetches the value of a 16-bit general purpose register.
6438 *
6439 * @returns The register value.
6440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6441 * @param iReg The register.
6442 */
6443DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPU pVCpu, uint8_t iReg)
6444{
6445 Assert(iReg < 16);
6446 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6447}
6448
6449
6450/**
6451 * Fetches the value of a 32-bit general purpose register.
6452 *
6453 * @returns The register value.
6454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6455 * @param iReg The register.
6456 */
6457DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPU pVCpu, uint8_t iReg)
6458{
6459 Assert(iReg < 16);
6460 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6461}
6462
6463
6464/**
6465 * Fetches the value of a 64-bit general purpose register.
6466 *
6467 * @returns The register value.
6468 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6469 * @param iReg The register.
6470 */
6471DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPU pVCpu, uint8_t iReg)
6472{
6473 Assert(iReg < 16);
6474 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6475}
6476
6477
6478/**
6479 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6480 *
6481 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6482 * segment limit.
6483 *
6484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6485 * @param offNextInstr The offset of the next instruction.
6486 */
6487IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPU pVCpu, int8_t offNextInstr)
6488{
6489 switch (pVCpu->iem.s.enmEffOpSize)
6490 {
6491 case IEMMODE_16BIT:
6492 {
6493 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6494 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6495 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6496 return iemRaiseGeneralProtectionFault0(pVCpu);
6497 pVCpu->cpum.GstCtx.rip = uNewIp;
6498 break;
6499 }
6500
6501 case IEMMODE_32BIT:
6502 {
6503 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6504 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6505
6506 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6507 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6508 return iemRaiseGeneralProtectionFault0(pVCpu);
6509 pVCpu->cpum.GstCtx.rip = uNewEip;
6510 break;
6511 }
6512
6513 case IEMMODE_64BIT:
6514 {
6515 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6516
6517 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6518 if (!IEM_IS_CANONICAL(uNewRip))
6519 return iemRaiseGeneralProtectionFault0(pVCpu);
6520 pVCpu->cpum.GstCtx.rip = uNewRip;
6521 break;
6522 }
6523
6524 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6525 }
6526
6527 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6528
6529#ifndef IEM_WITH_CODE_TLB
6530 /* Flush the prefetch buffer. */
6531 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6532#endif
6533
6534 return VINF_SUCCESS;
6535}
6536
6537
6538/**
6539 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6540 *
6541 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6542 * segment limit.
6543 *
6544 * @returns Strict VBox status code.
6545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6546 * @param offNextInstr The offset of the next instruction.
6547 */
6548IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPU pVCpu, int16_t offNextInstr)
6549{
6550 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6551
6552 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6553 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6554 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6555 return iemRaiseGeneralProtectionFault0(pVCpu);
6556 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6557 pVCpu->cpum.GstCtx.rip = uNewIp;
6558 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6559
6560#ifndef IEM_WITH_CODE_TLB
6561 /* Flush the prefetch buffer. */
6562 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6563#endif
6564
6565 return VINF_SUCCESS;
6566}
6567
6568
6569/**
6570 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6571 *
6572 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6573 * segment limit.
6574 *
6575 * @returns Strict VBox status code.
6576 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6577 * @param offNextInstr The offset of the next instruction.
6578 */
6579IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPU pVCpu, int32_t offNextInstr)
6580{
6581 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6582
6583 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6584 {
6585 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6586
6587 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6588 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6589 return iemRaiseGeneralProtectionFault0(pVCpu);
6590 pVCpu->cpum.GstCtx.rip = uNewEip;
6591 }
6592 else
6593 {
6594 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6595
6596 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6597 if (!IEM_IS_CANONICAL(uNewRip))
6598 return iemRaiseGeneralProtectionFault0(pVCpu);
6599 pVCpu->cpum.GstCtx.rip = uNewRip;
6600 }
6601 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6602
6603#ifndef IEM_WITH_CODE_TLB
6604 /* Flush the prefetch buffer. */
6605 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6606#endif
6607
6608 return VINF_SUCCESS;
6609}
6610
6611
6612/**
6613 * Performs a near jump to the specified address.
6614 *
6615 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6616 * segment limit.
6617 *
6618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6619 * @param uNewRip The new RIP value.
6620 */
6621IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPU pVCpu, uint64_t uNewRip)
6622{
6623 switch (pVCpu->iem.s.enmEffOpSize)
6624 {
6625 case IEMMODE_16BIT:
6626 {
6627 Assert(uNewRip <= UINT16_MAX);
6628 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6629 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6630 return iemRaiseGeneralProtectionFault0(pVCpu);
6631 /** @todo Test 16-bit jump in 64-bit mode. */
6632 pVCpu->cpum.GstCtx.rip = uNewRip;
6633 break;
6634 }
6635
6636 case IEMMODE_32BIT:
6637 {
6638 Assert(uNewRip <= UINT32_MAX);
6639 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6640 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6641
6642 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6643 return iemRaiseGeneralProtectionFault0(pVCpu);
6644 pVCpu->cpum.GstCtx.rip = uNewRip;
6645 break;
6646 }
6647
6648 case IEMMODE_64BIT:
6649 {
6650 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6651
6652 if (!IEM_IS_CANONICAL(uNewRip))
6653 return iemRaiseGeneralProtectionFault0(pVCpu);
6654 pVCpu->cpum.GstCtx.rip = uNewRip;
6655 break;
6656 }
6657
6658 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6659 }
6660
6661 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6662
6663#ifndef IEM_WITH_CODE_TLB
6664 /* Flush the prefetch buffer. */
6665 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6666#endif
6667
6668 return VINF_SUCCESS;
6669}
6670
6671
6672/**
6673 * Get the address of the top of the stack.
6674 *
6675 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6676 */
6677DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6678{
6679 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6680 return pVCpu->cpum.GstCtx.rsp;
6681 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6682 return pVCpu->cpum.GstCtx.esp;
6683 return pVCpu->cpum.GstCtx.sp;
6684}
6685
6686
6687/**
6688 * Updates the RIP/EIP/IP to point to the next instruction.
6689 *
6690 * This function leaves the EFLAGS.RF flag alone.
6691 *
6692 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6693 * @param cbInstr The number of bytes to add.
6694 */
6695IEM_STATIC void iemRegAddToRipKeepRF(PVMCPU pVCpu, uint8_t cbInstr)
6696{
6697 switch (pVCpu->iem.s.enmCpuMode)
6698 {
6699 case IEMMODE_16BIT:
6700 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6701 pVCpu->cpum.GstCtx.eip += cbInstr;
6702 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6703 break;
6704
6705 case IEMMODE_32BIT:
6706 pVCpu->cpum.GstCtx.eip += cbInstr;
6707 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6708 break;
6709
6710 case IEMMODE_64BIT:
6711 pVCpu->cpum.GstCtx.rip += cbInstr;
6712 break;
6713 default: AssertFailed();
6714 }
6715}
6716
6717
6718#if 0
6719/**
6720 * Updates the RIP/EIP/IP to point to the next instruction.
6721 *
6722 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6723 */
6724IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPU pVCpu)
6725{
6726 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6727}
6728#endif
6729
6730
6731
6732/**
6733 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6734 *
6735 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6736 * @param cbInstr The number of bytes to add.
6737 */
6738IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPU pVCpu, uint8_t cbInstr)
6739{
6740 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6741
6742 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6743#if ARCH_BITS >= 64
6744 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6745 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6746 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6747#else
6748 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6749 pVCpu->cpum.GstCtx.rip += cbInstr;
6750 else
6751 pVCpu->cpum.GstCtx.eip += cbInstr;
6752#endif
6753}
6754
6755
6756/**
6757 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6758 *
6759 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6760 */
6761IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPU pVCpu)
6762{
6763 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6764}
6765
6766
6767/**
6768 * Adds to the stack pointer.
6769 *
6770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6771 * @param cbToAdd The number of bytes to add (8-bit!).
6772 */
6773DECLINLINE(void) iemRegAddToRsp(PVMCPU pVCpu, uint8_t cbToAdd)
6774{
6775 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6776 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6777 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6778 pVCpu->cpum.GstCtx.esp += cbToAdd;
6779 else
6780 pVCpu->cpum.GstCtx.sp += cbToAdd;
6781}
6782
6783
6784/**
6785 * Subtracts from the stack pointer.
6786 *
6787 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6788 * @param cbToSub The number of bytes to subtract (8-bit!).
6789 */
6790DECLINLINE(void) iemRegSubFromRsp(PVMCPU pVCpu, uint8_t cbToSub)
6791{
6792 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6793 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6794 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6795 pVCpu->cpum.GstCtx.esp -= cbToSub;
6796 else
6797 pVCpu->cpum.GstCtx.sp -= cbToSub;
6798}
6799
6800
6801/**
6802 * Adds to the temporary stack pointer.
6803 *
6804 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6805 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6806 * @param cbToAdd The number of bytes to add (16-bit).
6807 */
6808DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6809{
6810 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6811 pTmpRsp->u += cbToAdd;
6812 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6813 pTmpRsp->DWords.dw0 += cbToAdd;
6814 else
6815 pTmpRsp->Words.w0 += cbToAdd;
6816}
6817
6818
6819/**
6820 * Subtracts from the temporary stack pointer.
6821 *
6822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6823 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6824 * @param cbToSub The number of bytes to subtract.
6825 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6826 * expecting that.
6827 */
6828DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6829{
6830 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6831 pTmpRsp->u -= cbToSub;
6832 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6833 pTmpRsp->DWords.dw0 -= cbToSub;
6834 else
6835 pTmpRsp->Words.w0 -= cbToSub;
6836}
6837
6838
6839/**
6840 * Calculates the effective stack address for a push of the specified size as
6841 * well as the new RSP value (upper bits may be masked).
6842 *
6843 * @returns Effective stack addressf for the push.
6844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6845 * @param cbItem The size of the stack item to pop.
6846 * @param puNewRsp Where to return the new RSP value.
6847 */
6848DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6849{
6850 RTUINT64U uTmpRsp;
6851 RTGCPTR GCPtrTop;
6852 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6853
6854 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6855 GCPtrTop = uTmpRsp.u -= cbItem;
6856 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6857 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6858 else
6859 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6860 *puNewRsp = uTmpRsp.u;
6861 return GCPtrTop;
6862}
6863
6864
6865/**
6866 * Gets the current stack pointer and calculates the value after a pop of the
6867 * specified size.
6868 *
6869 * @returns Current stack pointer.
6870 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6871 * @param cbItem The size of the stack item to pop.
6872 * @param puNewRsp Where to return the new RSP value.
6873 */
6874DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6875{
6876 RTUINT64U uTmpRsp;
6877 RTGCPTR GCPtrTop;
6878 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6879
6880 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6881 {
6882 GCPtrTop = uTmpRsp.u;
6883 uTmpRsp.u += cbItem;
6884 }
6885 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6886 {
6887 GCPtrTop = uTmpRsp.DWords.dw0;
6888 uTmpRsp.DWords.dw0 += cbItem;
6889 }
6890 else
6891 {
6892 GCPtrTop = uTmpRsp.Words.w0;
6893 uTmpRsp.Words.w0 += cbItem;
6894 }
6895 *puNewRsp = uTmpRsp.u;
6896 return GCPtrTop;
6897}
6898
6899
6900/**
6901 * Calculates the effective stack address for a push of the specified size as
6902 * well as the new temporary RSP value (upper bits may be masked).
6903 *
6904 * @returns Effective stack addressf for the push.
6905 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6906 * @param pTmpRsp The temporary stack pointer. This is updated.
6907 * @param cbItem The size of the stack item to pop.
6908 */
6909DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6910{
6911 RTGCPTR GCPtrTop;
6912
6913 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6914 GCPtrTop = pTmpRsp->u -= cbItem;
6915 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6916 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6917 else
6918 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6919 return GCPtrTop;
6920}
6921
6922
6923/**
6924 * Gets the effective stack address for a pop of the specified size and
6925 * calculates and updates the temporary RSP.
6926 *
6927 * @returns Current stack pointer.
6928 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6929 * @param pTmpRsp The temporary stack pointer. This is updated.
6930 * @param cbItem The size of the stack item to pop.
6931 */
6932DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6933{
6934 RTGCPTR GCPtrTop;
6935 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6936 {
6937 GCPtrTop = pTmpRsp->u;
6938 pTmpRsp->u += cbItem;
6939 }
6940 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6941 {
6942 GCPtrTop = pTmpRsp->DWords.dw0;
6943 pTmpRsp->DWords.dw0 += cbItem;
6944 }
6945 else
6946 {
6947 GCPtrTop = pTmpRsp->Words.w0;
6948 pTmpRsp->Words.w0 += cbItem;
6949 }
6950 return GCPtrTop;
6951}
6952
6953/** @} */
6954
6955
6956/** @name FPU access and helpers.
6957 *
6958 * @{
6959 */
6960
6961
6962/**
6963 * Hook for preparing to use the host FPU.
6964 *
6965 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6966 *
6967 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6968 */
6969DECLINLINE(void) iemFpuPrepareUsage(PVMCPU pVCpu)
6970{
6971#ifdef IN_RING3
6972 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6973#else
6974 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6975#endif
6976 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6977}
6978
6979
6980/**
6981 * Hook for preparing to use the host FPU for SSE.
6982 *
6983 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6984 *
6985 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6986 */
6987DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPU pVCpu)
6988{
6989 iemFpuPrepareUsage(pVCpu);
6990}
6991
6992
6993/**
6994 * Hook for preparing to use the host FPU for AVX.
6995 *
6996 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6997 *
6998 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6999 */
7000DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPU pVCpu)
7001{
7002 iemFpuPrepareUsage(pVCpu);
7003}
7004
7005
7006/**
7007 * Hook for actualizing the guest FPU state before the interpreter reads it.
7008 *
7009 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7010 *
7011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7012 */
7013DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPU pVCpu)
7014{
7015#ifdef IN_RING3
7016 NOREF(pVCpu);
7017#else
7018 CPUMRZFpuStateActualizeForRead(pVCpu);
7019#endif
7020 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7021}
7022
7023
7024/**
7025 * Hook for actualizing the guest FPU state before the interpreter changes it.
7026 *
7027 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7028 *
7029 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7030 */
7031DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPU pVCpu)
7032{
7033#ifdef IN_RING3
7034 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7035#else
7036 CPUMRZFpuStateActualizeForChange(pVCpu);
7037#endif
7038 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7039}
7040
7041
7042/**
7043 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
7044 * only.
7045 *
7046 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7047 *
7048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7049 */
7050DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPU pVCpu)
7051{
7052#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7053 NOREF(pVCpu);
7054#else
7055 CPUMRZFpuStateActualizeSseForRead(pVCpu);
7056#endif
7057 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7058}
7059
7060
7061/**
7062 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7063 * read+write.
7064 *
7065 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7066 *
7067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7068 */
7069DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPU pVCpu)
7070{
7071#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7072 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7073#else
7074 CPUMRZFpuStateActualizeForChange(pVCpu);
7075#endif
7076 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7077}
7078
7079
7080/**
7081 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7082 * only.
7083 *
7084 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7085 *
7086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7087 */
7088DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPU pVCpu)
7089{
7090#ifdef IN_RING3
7091 NOREF(pVCpu);
7092#else
7093 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7094#endif
7095 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7096}
7097
7098
7099/**
7100 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7101 * read+write.
7102 *
7103 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7104 *
7105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7106 */
7107DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPU pVCpu)
7108{
7109#ifdef IN_RING3
7110 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7111#else
7112 CPUMRZFpuStateActualizeForChange(pVCpu);
7113#endif
7114 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7115}
7116
7117
7118/**
7119 * Stores a QNaN value into a FPU register.
7120 *
7121 * @param pReg Pointer to the register.
7122 */
7123DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7124{
7125 pReg->au32[0] = UINT32_C(0x00000000);
7126 pReg->au32[1] = UINT32_C(0xc0000000);
7127 pReg->au16[4] = UINT16_C(0xffff);
7128}
7129
7130
7131/**
7132 * Updates the FOP, FPU.CS and FPUIP registers.
7133 *
7134 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7135 * @param pFpuCtx The FPU context.
7136 */
7137DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPU pVCpu, PX86FXSTATE pFpuCtx)
7138{
7139 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7140 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7141 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7142 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7143 {
7144 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7145 * happens in real mode here based on the fnsave and fnstenv images. */
7146 pFpuCtx->CS = 0;
7147 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7148 }
7149 else
7150 {
7151 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7152 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7153 }
7154}
7155
7156
7157/**
7158 * Updates the x87.DS and FPUDP registers.
7159 *
7160 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7161 * @param pFpuCtx The FPU context.
7162 * @param iEffSeg The effective segment register.
7163 * @param GCPtrEff The effective address relative to @a iEffSeg.
7164 */
7165DECLINLINE(void) iemFpuUpdateDP(PVMCPU pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7166{
7167 RTSEL sel;
7168 switch (iEffSeg)
7169 {
7170 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7171 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7172 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7173 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7174 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7175 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7176 default:
7177 AssertMsgFailed(("%d\n", iEffSeg));
7178 sel = pVCpu->cpum.GstCtx.ds.Sel;
7179 }
7180 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7181 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7182 {
7183 pFpuCtx->DS = 0;
7184 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7185 }
7186 else
7187 {
7188 pFpuCtx->DS = sel;
7189 pFpuCtx->FPUDP = GCPtrEff;
7190 }
7191}
7192
7193
7194/**
7195 * Rotates the stack registers in the push direction.
7196 *
7197 * @param pFpuCtx The FPU context.
7198 * @remarks This is a complete waste of time, but fxsave stores the registers in
7199 * stack order.
7200 */
7201DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7202{
7203 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7204 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7205 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7206 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7207 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7208 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7209 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7210 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7211 pFpuCtx->aRegs[0].r80 = r80Tmp;
7212}
7213
7214
7215/**
7216 * Rotates the stack registers in the pop direction.
7217 *
7218 * @param pFpuCtx The FPU context.
7219 * @remarks This is a complete waste of time, but fxsave stores the registers in
7220 * stack order.
7221 */
7222DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7223{
7224 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7225 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7226 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7227 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7228 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7229 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7230 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7231 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7232 pFpuCtx->aRegs[7].r80 = r80Tmp;
7233}
7234
7235
7236/**
7237 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7238 * exception prevents it.
7239 *
7240 * @param pResult The FPU operation result to push.
7241 * @param pFpuCtx The FPU context.
7242 */
7243IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7244{
7245 /* Update FSW and bail if there are pending exceptions afterwards. */
7246 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7247 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7248 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7249 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7250 {
7251 pFpuCtx->FSW = fFsw;
7252 return;
7253 }
7254
7255 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7256 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7257 {
7258 /* All is fine, push the actual value. */
7259 pFpuCtx->FTW |= RT_BIT(iNewTop);
7260 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7261 }
7262 else if (pFpuCtx->FCW & X86_FCW_IM)
7263 {
7264 /* Masked stack overflow, push QNaN. */
7265 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7266 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7267 }
7268 else
7269 {
7270 /* Raise stack overflow, don't push anything. */
7271 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7272 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7273 return;
7274 }
7275
7276 fFsw &= ~X86_FSW_TOP_MASK;
7277 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7278 pFpuCtx->FSW = fFsw;
7279
7280 iemFpuRotateStackPush(pFpuCtx);
7281}
7282
7283
7284/**
7285 * Stores a result in a FPU register and updates the FSW and FTW.
7286 *
7287 * @param pFpuCtx The FPU context.
7288 * @param pResult The result to store.
7289 * @param iStReg Which FPU register to store it in.
7290 */
7291IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7292{
7293 Assert(iStReg < 8);
7294 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7295 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7296 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7297 pFpuCtx->FTW |= RT_BIT(iReg);
7298 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7299}
7300
7301
7302/**
7303 * Only updates the FPU status word (FSW) with the result of the current
7304 * instruction.
7305 *
7306 * @param pFpuCtx The FPU context.
7307 * @param u16FSW The FSW output of the current instruction.
7308 */
7309IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7310{
7311 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7312 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7313}
7314
7315
7316/**
7317 * Pops one item off the FPU stack if no pending exception prevents it.
7318 *
7319 * @param pFpuCtx The FPU context.
7320 */
7321IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7322{
7323 /* Check pending exceptions. */
7324 uint16_t uFSW = pFpuCtx->FSW;
7325 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7326 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7327 return;
7328
7329 /* TOP--. */
7330 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7331 uFSW &= ~X86_FSW_TOP_MASK;
7332 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7333 pFpuCtx->FSW = uFSW;
7334
7335 /* Mark the previous ST0 as empty. */
7336 iOldTop >>= X86_FSW_TOP_SHIFT;
7337 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7338
7339 /* Rotate the registers. */
7340 iemFpuRotateStackPop(pFpuCtx);
7341}
7342
7343
7344/**
7345 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7346 *
7347 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7348 * @param pResult The FPU operation result to push.
7349 */
7350IEM_STATIC void iemFpuPushResult(PVMCPU pVCpu, PIEMFPURESULT pResult)
7351{
7352 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7353 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7354 iemFpuMaybePushResult(pResult, pFpuCtx);
7355}
7356
7357
7358/**
7359 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7360 * and sets FPUDP and FPUDS.
7361 *
7362 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7363 * @param pResult The FPU operation result to push.
7364 * @param iEffSeg The effective segment register.
7365 * @param GCPtrEff The effective address relative to @a iEffSeg.
7366 */
7367IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7368{
7369 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7370 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7371 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7372 iemFpuMaybePushResult(pResult, pFpuCtx);
7373}
7374
7375
7376/**
7377 * Replace ST0 with the first value and push the second onto the FPU stack,
7378 * unless a pending exception prevents it.
7379 *
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param pResult The FPU operation result to store and push.
7382 */
7383IEM_STATIC void iemFpuPushResultTwo(PVMCPU pVCpu, PIEMFPURESULTTWO pResult)
7384{
7385 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7386 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7387
7388 /* Update FSW and bail if there are pending exceptions afterwards. */
7389 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7390 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7391 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7392 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7393 {
7394 pFpuCtx->FSW = fFsw;
7395 return;
7396 }
7397
7398 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7399 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7400 {
7401 /* All is fine, push the actual value. */
7402 pFpuCtx->FTW |= RT_BIT(iNewTop);
7403 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7404 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7405 }
7406 else if (pFpuCtx->FCW & X86_FCW_IM)
7407 {
7408 /* Masked stack overflow, push QNaN. */
7409 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7410 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7411 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7412 }
7413 else
7414 {
7415 /* Raise stack overflow, don't push anything. */
7416 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7417 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7418 return;
7419 }
7420
7421 fFsw &= ~X86_FSW_TOP_MASK;
7422 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7423 pFpuCtx->FSW = fFsw;
7424
7425 iemFpuRotateStackPush(pFpuCtx);
7426}
7427
7428
7429/**
7430 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7431 * FOP.
7432 *
7433 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7434 * @param pResult The result to store.
7435 * @param iStReg Which FPU register to store it in.
7436 */
7437IEM_STATIC void iemFpuStoreResult(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7438{
7439 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7440 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7441 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7442}
7443
7444
7445/**
7446 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7447 * FOP, and then pops the stack.
7448 *
7449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7450 * @param pResult The result to store.
7451 * @param iStReg Which FPU register to store it in.
7452 */
7453IEM_STATIC void iemFpuStoreResultThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7454{
7455 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7456 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7457 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7458 iemFpuMaybePopOne(pFpuCtx);
7459}
7460
7461
7462/**
7463 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7464 * FPUDP, and FPUDS.
7465 *
7466 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7467 * @param pResult The result to store.
7468 * @param iStReg Which FPU register to store it in.
7469 * @param iEffSeg The effective memory operand selector register.
7470 * @param GCPtrEff The effective memory operand offset.
7471 */
7472IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPU pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7473 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7474{
7475 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7476 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7477 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7478 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7479}
7480
7481
7482/**
7483 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7484 * FPUDP, and FPUDS, and then pops the stack.
7485 *
7486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7487 * @param pResult The result to store.
7488 * @param iStReg Which FPU register to store it in.
7489 * @param iEffSeg The effective memory operand selector register.
7490 * @param GCPtrEff The effective memory operand offset.
7491 */
7492IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPU pVCpu, PIEMFPURESULT pResult,
7493 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7494{
7495 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7496 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7497 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7498 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7499 iemFpuMaybePopOne(pFpuCtx);
7500}
7501
7502
7503/**
7504 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7505 *
7506 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7507 */
7508IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPU pVCpu)
7509{
7510 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7511 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7512}
7513
7514
7515/**
7516 * Marks the specified stack register as free (for FFREE).
7517 *
7518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7519 * @param iStReg The register to free.
7520 */
7521IEM_STATIC void iemFpuStackFree(PVMCPU pVCpu, uint8_t iStReg)
7522{
7523 Assert(iStReg < 8);
7524 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7525 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7526 pFpuCtx->FTW &= ~RT_BIT(iReg);
7527}
7528
7529
7530/**
7531 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7532 *
7533 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7534 */
7535IEM_STATIC void iemFpuStackIncTop(PVMCPU pVCpu)
7536{
7537 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7538 uint16_t uFsw = pFpuCtx->FSW;
7539 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7540 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7541 uFsw &= ~X86_FSW_TOP_MASK;
7542 uFsw |= uTop;
7543 pFpuCtx->FSW = uFsw;
7544}
7545
7546
7547/**
7548 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7549 *
7550 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7551 */
7552IEM_STATIC void iemFpuStackDecTop(PVMCPU pVCpu)
7553{
7554 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7555 uint16_t uFsw = pFpuCtx->FSW;
7556 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7557 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7558 uFsw &= ~X86_FSW_TOP_MASK;
7559 uFsw |= uTop;
7560 pFpuCtx->FSW = uFsw;
7561}
7562
7563
7564/**
7565 * Updates the FSW, FOP, FPUIP, and FPUCS.
7566 *
7567 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7568 * @param u16FSW The FSW from the current instruction.
7569 */
7570IEM_STATIC void iemFpuUpdateFSW(PVMCPU pVCpu, uint16_t u16FSW)
7571{
7572 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7573 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7574 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7575}
7576
7577
7578/**
7579 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7580 *
7581 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7582 * @param u16FSW The FSW from the current instruction.
7583 */
7584IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPU pVCpu, uint16_t u16FSW)
7585{
7586 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7587 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7588 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7589 iemFpuMaybePopOne(pFpuCtx);
7590}
7591
7592
7593/**
7594 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7595 *
7596 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7597 * @param u16FSW The FSW from the current instruction.
7598 * @param iEffSeg The effective memory operand selector register.
7599 * @param GCPtrEff The effective memory operand offset.
7600 */
7601IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7602{
7603 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7604 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7605 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7606 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7607}
7608
7609
7610/**
7611 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7612 *
7613 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7614 * @param u16FSW The FSW from the current instruction.
7615 */
7616IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPU pVCpu, uint16_t u16FSW)
7617{
7618 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7619 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7620 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7621 iemFpuMaybePopOne(pFpuCtx);
7622 iemFpuMaybePopOne(pFpuCtx);
7623}
7624
7625
7626/**
7627 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7628 *
7629 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7630 * @param u16FSW The FSW from the current instruction.
7631 * @param iEffSeg The effective memory operand selector register.
7632 * @param GCPtrEff The effective memory operand offset.
7633 */
7634IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPU pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7635{
7636 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7637 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7638 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7639 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7640 iemFpuMaybePopOne(pFpuCtx);
7641}
7642
7643
7644/**
7645 * Worker routine for raising an FPU stack underflow exception.
7646 *
7647 * @param pFpuCtx The FPU context.
7648 * @param iStReg The stack register being accessed.
7649 */
7650IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7651{
7652 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7653 if (pFpuCtx->FCW & X86_FCW_IM)
7654 {
7655 /* Masked underflow. */
7656 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7657 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7658 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7659 if (iStReg != UINT8_MAX)
7660 {
7661 pFpuCtx->FTW |= RT_BIT(iReg);
7662 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7663 }
7664 }
7665 else
7666 {
7667 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7668 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7669 }
7670}
7671
7672
7673/**
7674 * Raises a FPU stack underflow exception.
7675 *
7676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7677 * @param iStReg The destination register that should be loaded
7678 * with QNaN if \#IS is not masked. Specify
7679 * UINT8_MAX if none (like for fcom).
7680 */
7681DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPU pVCpu, uint8_t iStReg)
7682{
7683 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7684 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7685 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7686}
7687
7688
7689DECL_NO_INLINE(IEM_STATIC, void)
7690iemFpuStackUnderflowWithMemOp(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7691{
7692 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7693 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7694 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7695 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7696}
7697
7698
7699DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPU pVCpu, uint8_t iStReg)
7700{
7701 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7702 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7703 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7704 iemFpuMaybePopOne(pFpuCtx);
7705}
7706
7707
7708DECL_NO_INLINE(IEM_STATIC, void)
7709iemFpuStackUnderflowWithMemOpThenPop(PVMCPU pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7710{
7711 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7712 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7713 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7714 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7715 iemFpuMaybePopOne(pFpuCtx);
7716}
7717
7718
7719DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPU pVCpu)
7720{
7721 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7722 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7723 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7724 iemFpuMaybePopOne(pFpuCtx);
7725 iemFpuMaybePopOne(pFpuCtx);
7726}
7727
7728
7729DECL_NO_INLINE(IEM_STATIC, void)
7730iemFpuStackPushUnderflow(PVMCPU pVCpu)
7731{
7732 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7733 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7734
7735 if (pFpuCtx->FCW & X86_FCW_IM)
7736 {
7737 /* Masked overflow - Push QNaN. */
7738 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7739 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7740 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7741 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7742 pFpuCtx->FTW |= RT_BIT(iNewTop);
7743 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7744 iemFpuRotateStackPush(pFpuCtx);
7745 }
7746 else
7747 {
7748 /* Exception pending - don't change TOP or the register stack. */
7749 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7750 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7751 }
7752}
7753
7754
7755DECL_NO_INLINE(IEM_STATIC, void)
7756iemFpuStackPushUnderflowTwo(PVMCPU pVCpu)
7757{
7758 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7759 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7760
7761 if (pFpuCtx->FCW & X86_FCW_IM)
7762 {
7763 /* Masked overflow - Push QNaN. */
7764 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7765 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7766 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7767 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7768 pFpuCtx->FTW |= RT_BIT(iNewTop);
7769 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7770 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7771 iemFpuRotateStackPush(pFpuCtx);
7772 }
7773 else
7774 {
7775 /* Exception pending - don't change TOP or the register stack. */
7776 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7777 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7778 }
7779}
7780
7781
7782/**
7783 * Worker routine for raising an FPU stack overflow exception on a push.
7784 *
7785 * @param pFpuCtx The FPU context.
7786 */
7787IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7788{
7789 if (pFpuCtx->FCW & X86_FCW_IM)
7790 {
7791 /* Masked overflow. */
7792 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7793 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7794 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7795 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7796 pFpuCtx->FTW |= RT_BIT(iNewTop);
7797 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7798 iemFpuRotateStackPush(pFpuCtx);
7799 }
7800 else
7801 {
7802 /* Exception pending - don't change TOP or the register stack. */
7803 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7804 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7805 }
7806}
7807
7808
7809/**
7810 * Raises a FPU stack overflow exception on a push.
7811 *
7812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7813 */
7814DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPU pVCpu)
7815{
7816 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7817 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7818 iemFpuStackPushOverflowOnly(pFpuCtx);
7819}
7820
7821
7822/**
7823 * Raises a FPU stack overflow exception on a push with a memory operand.
7824 *
7825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7826 * @param iEffSeg The effective memory operand selector register.
7827 * @param GCPtrEff The effective memory operand offset.
7828 */
7829DECL_NO_INLINE(IEM_STATIC, void)
7830iemFpuStackPushOverflowWithMemOp(PVMCPU pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7831{
7832 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7833 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7834 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7835 iemFpuStackPushOverflowOnly(pFpuCtx);
7836}
7837
7838
7839IEM_STATIC int iemFpuStRegNotEmpty(PVMCPU pVCpu, uint8_t iStReg)
7840{
7841 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7842 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7843 if (pFpuCtx->FTW & RT_BIT(iReg))
7844 return VINF_SUCCESS;
7845 return VERR_NOT_FOUND;
7846}
7847
7848
7849IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7850{
7851 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7852 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7853 if (pFpuCtx->FTW & RT_BIT(iReg))
7854 {
7855 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7856 return VINF_SUCCESS;
7857 }
7858 return VERR_NOT_FOUND;
7859}
7860
7861
7862IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7863 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7864{
7865 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7866 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7867 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7868 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7869 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7870 {
7871 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7872 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7873 return VINF_SUCCESS;
7874 }
7875 return VERR_NOT_FOUND;
7876}
7877
7878
7879IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPU pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7880{
7881 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7882 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7883 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7884 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7885 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7886 {
7887 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7888 return VINF_SUCCESS;
7889 }
7890 return VERR_NOT_FOUND;
7891}
7892
7893
7894/**
7895 * Updates the FPU exception status after FCW is changed.
7896 *
7897 * @param pFpuCtx The FPU context.
7898 */
7899IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7900{
7901 uint16_t u16Fsw = pFpuCtx->FSW;
7902 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7903 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7904 else
7905 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7906 pFpuCtx->FSW = u16Fsw;
7907}
7908
7909
7910/**
7911 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7912 *
7913 * @returns The full FTW.
7914 * @param pFpuCtx The FPU context.
7915 */
7916IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7917{
7918 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7919 uint16_t u16Ftw = 0;
7920 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7921 for (unsigned iSt = 0; iSt < 8; iSt++)
7922 {
7923 unsigned const iReg = (iSt + iTop) & 7;
7924 if (!(u8Ftw & RT_BIT(iReg)))
7925 u16Ftw |= 3 << (iReg * 2); /* empty */
7926 else
7927 {
7928 uint16_t uTag;
7929 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7930 if (pr80Reg->s.uExponent == 0x7fff)
7931 uTag = 2; /* Exponent is all 1's => Special. */
7932 else if (pr80Reg->s.uExponent == 0x0000)
7933 {
7934 if (pr80Reg->s.u64Mantissa == 0x0000)
7935 uTag = 1; /* All bits are zero => Zero. */
7936 else
7937 uTag = 2; /* Must be special. */
7938 }
7939 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7940 uTag = 0; /* Valid. */
7941 else
7942 uTag = 2; /* Must be special. */
7943
7944 u16Ftw |= uTag << (iReg * 2); /* empty */
7945 }
7946 }
7947
7948 return u16Ftw;
7949}
7950
7951
7952/**
7953 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7954 *
7955 * @returns The compressed FTW.
7956 * @param u16FullFtw The full FTW to convert.
7957 */
7958IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7959{
7960 uint8_t u8Ftw = 0;
7961 for (unsigned i = 0; i < 8; i++)
7962 {
7963 if ((u16FullFtw & 3) != 3 /*empty*/)
7964 u8Ftw |= RT_BIT(i);
7965 u16FullFtw >>= 2;
7966 }
7967
7968 return u8Ftw;
7969}
7970
7971/** @} */
7972
7973
7974/** @name Memory access.
7975 *
7976 * @{
7977 */
7978
7979
7980/**
7981 * Updates the IEMCPU::cbWritten counter if applicable.
7982 *
7983 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7984 * @param fAccess The access being accounted for.
7985 * @param cbMem The access size.
7986 */
7987DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPU pVCpu, uint32_t fAccess, size_t cbMem)
7988{
7989 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7990 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7991 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7992}
7993
7994
7995/**
7996 * Checks if the given segment can be written to, raise the appropriate
7997 * exception if not.
7998 *
7999 * @returns VBox strict status code.
8000 *
8001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8002 * @param pHid Pointer to the hidden register.
8003 * @param iSegReg The register number.
8004 * @param pu64BaseAddr Where to return the base address to use for the
8005 * segment. (In 64-bit code it may differ from the
8006 * base in the hidden segment.)
8007 */
8008IEM_STATIC VBOXSTRICTRC
8009iemMemSegCheckWriteAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8010{
8011 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8012
8013 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8014 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8015 else
8016 {
8017 if (!pHid->Attr.n.u1Present)
8018 {
8019 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8020 AssertRelease(uSel == 0);
8021 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8022 return iemRaiseGeneralProtectionFault0(pVCpu);
8023 }
8024
8025 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
8026 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8027 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
8028 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
8029 *pu64BaseAddr = pHid->u64Base;
8030 }
8031 return VINF_SUCCESS;
8032}
8033
8034
8035/**
8036 * Checks if the given segment can be read from, raise the appropriate
8037 * exception if not.
8038 *
8039 * @returns VBox strict status code.
8040 *
8041 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8042 * @param pHid Pointer to the hidden register.
8043 * @param iSegReg The register number.
8044 * @param pu64BaseAddr Where to return the base address to use for the
8045 * segment. (In 64-bit code it may differ from the
8046 * base in the hidden segment.)
8047 */
8048IEM_STATIC VBOXSTRICTRC
8049iemMemSegCheckReadAccessEx(PVMCPU pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8050{
8051 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8052
8053 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8054 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8055 else
8056 {
8057 if (!pHid->Attr.n.u1Present)
8058 {
8059 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8060 AssertRelease(uSel == 0);
8061 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8062 return iemRaiseGeneralProtectionFault0(pVCpu);
8063 }
8064
8065 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8066 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8067 *pu64BaseAddr = pHid->u64Base;
8068 }
8069 return VINF_SUCCESS;
8070}
8071
8072
8073/**
8074 * Applies the segment limit, base and attributes.
8075 *
8076 * This may raise a \#GP or \#SS.
8077 *
8078 * @returns VBox strict status code.
8079 *
8080 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8081 * @param fAccess The kind of access which is being performed.
8082 * @param iSegReg The index of the segment register to apply.
8083 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8084 * TSS, ++).
8085 * @param cbMem The access size.
8086 * @param pGCPtrMem Pointer to the guest memory address to apply
8087 * segmentation to. Input and output parameter.
8088 */
8089IEM_STATIC VBOXSTRICTRC
8090iemMemApplySegment(PVMCPU pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8091{
8092 if (iSegReg == UINT8_MAX)
8093 return VINF_SUCCESS;
8094
8095 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8096 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8097 switch (pVCpu->iem.s.enmCpuMode)
8098 {
8099 case IEMMODE_16BIT:
8100 case IEMMODE_32BIT:
8101 {
8102 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8103 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8104
8105 if ( pSel->Attr.n.u1Present
8106 && !pSel->Attr.n.u1Unusable)
8107 {
8108 Assert(pSel->Attr.n.u1DescType);
8109 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8110 {
8111 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8112 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8113 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8114
8115 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8116 {
8117 /** @todo CPL check. */
8118 }
8119
8120 /*
8121 * There are two kinds of data selectors, normal and expand down.
8122 */
8123 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8124 {
8125 if ( GCPtrFirst32 > pSel->u32Limit
8126 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8127 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8128 }
8129 else
8130 {
8131 /*
8132 * The upper boundary is defined by the B bit, not the G bit!
8133 */
8134 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8135 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8136 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8137 }
8138 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8139 }
8140 else
8141 {
8142
8143 /*
8144 * Code selector and usually be used to read thru, writing is
8145 * only permitted in real and V8086 mode.
8146 */
8147 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8148 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8149 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8150 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8151 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8152
8153 if ( GCPtrFirst32 > pSel->u32Limit
8154 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8155 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8156
8157 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8158 {
8159 /** @todo CPL check. */
8160 }
8161
8162 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8163 }
8164 }
8165 else
8166 return iemRaiseGeneralProtectionFault0(pVCpu);
8167 return VINF_SUCCESS;
8168 }
8169
8170 case IEMMODE_64BIT:
8171 {
8172 RTGCPTR GCPtrMem = *pGCPtrMem;
8173 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8174 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8175
8176 Assert(cbMem >= 1);
8177 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8178 return VINF_SUCCESS;
8179 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8180 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8181 return iemRaiseGeneralProtectionFault0(pVCpu);
8182 }
8183
8184 default:
8185 AssertFailedReturn(VERR_IEM_IPE_7);
8186 }
8187}
8188
8189
8190/**
8191 * Translates a virtual address to a physical physical address and checks if we
8192 * can access the page as specified.
8193 *
8194 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8195 * @param GCPtrMem The virtual address.
8196 * @param fAccess The intended access.
8197 * @param pGCPhysMem Where to return the physical address.
8198 */
8199IEM_STATIC VBOXSTRICTRC
8200iemMemPageTranslateAndCheckAccess(PVMCPU pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8201{
8202 /** @todo Need a different PGM interface here. We're currently using
8203 * generic / REM interfaces. this won't cut it for R0 & RC. */
8204 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8205 * iemSvmHandleWorldSwitch to work around raising a page-fault here. */
8206 RTGCPHYS GCPhys;
8207 uint64_t fFlags;
8208 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8209 if (RT_FAILURE(rc))
8210 {
8211 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8212 /** @todo Check unassigned memory in unpaged mode. */
8213 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8214 *pGCPhysMem = NIL_RTGCPHYS;
8215 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8216 }
8217
8218 /* If the page is writable and does not have the no-exec bit set, all
8219 access is allowed. Otherwise we'll have to check more carefully... */
8220 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8221 {
8222 /* Write to read only memory? */
8223 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8224 && !(fFlags & X86_PTE_RW)
8225 && ( (pVCpu->iem.s.uCpl == 3
8226 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8227 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8228 {
8229 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8230 *pGCPhysMem = NIL_RTGCPHYS;
8231 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8232 }
8233
8234 /* Kernel memory accessed by userland? */
8235 if ( !(fFlags & X86_PTE_US)
8236 && pVCpu->iem.s.uCpl == 3
8237 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8238 {
8239 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8240 *pGCPhysMem = NIL_RTGCPHYS;
8241 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8242 }
8243
8244 /* Executing non-executable memory? */
8245 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8246 && (fFlags & X86_PTE_PAE_NX)
8247 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8248 {
8249 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8250 *pGCPhysMem = NIL_RTGCPHYS;
8251 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8252 VERR_ACCESS_DENIED);
8253 }
8254 }
8255
8256 /*
8257 * Set the dirty / access flags.
8258 * ASSUMES this is set when the address is translated rather than on committ...
8259 */
8260 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8261 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8262 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8263 {
8264 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8265 AssertRC(rc2);
8266 }
8267
8268 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8269 *pGCPhysMem = GCPhys;
8270 return VINF_SUCCESS;
8271}
8272
8273
8274
8275/**
8276 * Maps a physical page.
8277 *
8278 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8280 * @param GCPhysMem The physical address.
8281 * @param fAccess The intended access.
8282 * @param ppvMem Where to return the mapping address.
8283 * @param pLock The PGM lock.
8284 */
8285IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8286{
8287#ifdef IEM_LOG_MEMORY_WRITES
8288 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8289 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8290#endif
8291
8292 /** @todo This API may require some improving later. A private deal with PGM
8293 * regarding locking and unlocking needs to be struct. A couple of TLBs
8294 * living in PGM, but with publicly accessible inlined access methods
8295 * could perhaps be an even better solution. */
8296 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8297 GCPhysMem,
8298 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8299 pVCpu->iem.s.fBypassHandlers,
8300 ppvMem,
8301 pLock);
8302 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8303 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8304
8305 return rc;
8306}
8307
8308
8309/**
8310 * Unmap a page previously mapped by iemMemPageMap.
8311 *
8312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8313 * @param GCPhysMem The physical address.
8314 * @param fAccess The intended access.
8315 * @param pvMem What iemMemPageMap returned.
8316 * @param pLock The PGM lock.
8317 */
8318DECLINLINE(void) iemMemPageUnmap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8319{
8320 NOREF(pVCpu);
8321 NOREF(GCPhysMem);
8322 NOREF(fAccess);
8323 NOREF(pvMem);
8324 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8325}
8326
8327
8328/**
8329 * Looks up a memory mapping entry.
8330 *
8331 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8333 * @param pvMem The memory address.
8334 * @param fAccess The access to.
8335 */
8336DECLINLINE(int) iemMapLookup(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8337{
8338 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8339 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8340 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8341 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8342 return 0;
8343 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8344 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8345 return 1;
8346 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8347 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8348 return 2;
8349 return VERR_NOT_FOUND;
8350}
8351
8352
8353/**
8354 * Finds a free memmap entry when using iNextMapping doesn't work.
8355 *
8356 * @returns Memory mapping index, 1024 on failure.
8357 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8358 */
8359IEM_STATIC unsigned iemMemMapFindFree(PVMCPU pVCpu)
8360{
8361 /*
8362 * The easy case.
8363 */
8364 if (pVCpu->iem.s.cActiveMappings == 0)
8365 {
8366 pVCpu->iem.s.iNextMapping = 1;
8367 return 0;
8368 }
8369
8370 /* There should be enough mappings for all instructions. */
8371 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8372
8373 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8374 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8375 return i;
8376
8377 AssertFailedReturn(1024);
8378}
8379
8380
8381/**
8382 * Commits a bounce buffer that needs writing back and unmaps it.
8383 *
8384 * @returns Strict VBox status code.
8385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8386 * @param iMemMap The index of the buffer to commit.
8387 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8388 * Always false in ring-3, obviously.
8389 */
8390IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPU pVCpu, unsigned iMemMap, bool fPostponeFail)
8391{
8392 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8393 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8394#ifdef IN_RING3
8395 Assert(!fPostponeFail);
8396 RT_NOREF_PV(fPostponeFail);
8397#endif
8398
8399 /*
8400 * Do the writing.
8401 */
8402 PVM pVM = pVCpu->CTX_SUFF(pVM);
8403 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8404 {
8405 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8406 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8407 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8408 if (!pVCpu->iem.s.fBypassHandlers)
8409 {
8410 /*
8411 * Carefully and efficiently dealing with access handler return
8412 * codes make this a little bloated.
8413 */
8414 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8416 pbBuf,
8417 cbFirst,
8418 PGMACCESSORIGIN_IEM);
8419 if (rcStrict == VINF_SUCCESS)
8420 {
8421 if (cbSecond)
8422 {
8423 rcStrict = PGMPhysWrite(pVM,
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8425 pbBuf + cbFirst,
8426 cbSecond,
8427 PGMACCESSORIGIN_IEM);
8428 if (rcStrict == VINF_SUCCESS)
8429 { /* nothing */ }
8430 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8435 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8436 }
8437#ifndef IN_RING3
8438 else if (fPostponeFail)
8439 {
8440 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8442 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8443 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8444 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8445 return iemSetPassUpStatus(pVCpu, rcStrict);
8446 }
8447#endif
8448 else
8449 {
8450 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8451 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8453 return rcStrict;
8454 }
8455 }
8456 }
8457 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8458 {
8459 if (!cbSecond)
8460 {
8461 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8462 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8463 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8464 }
8465 else
8466 {
8467 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8468 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8469 pbBuf + cbFirst,
8470 cbSecond,
8471 PGMACCESSORIGIN_IEM);
8472 if (rcStrict2 == VINF_SUCCESS)
8473 {
8474 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8476 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8477 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8478 }
8479 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8480 {
8481 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8482 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8483 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8484 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8485 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8486 }
8487#ifndef IN_RING3
8488 else if (fPostponeFail)
8489 {
8490 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8491 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8492 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8493 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8494 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8495 return iemSetPassUpStatus(pVCpu, rcStrict);
8496 }
8497#endif
8498 else
8499 {
8500 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8501 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8502 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8503 return rcStrict2;
8504 }
8505 }
8506 }
8507#ifndef IN_RING3
8508 else if (fPostponeFail)
8509 {
8510 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8511 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8512 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8513 if (!cbSecond)
8514 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8515 else
8516 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8517 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8518 return iemSetPassUpStatus(pVCpu, rcStrict);
8519 }
8520#endif
8521 else
8522 {
8523 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8524 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8525 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8526 return rcStrict;
8527 }
8528 }
8529 else
8530 {
8531 /*
8532 * No access handlers, much simpler.
8533 */
8534 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8535 if (RT_SUCCESS(rc))
8536 {
8537 if (cbSecond)
8538 {
8539 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8540 if (RT_SUCCESS(rc))
8541 { /* likely */ }
8542 else
8543 {
8544 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8545 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8546 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8547 return rc;
8548 }
8549 }
8550 }
8551 else
8552 {
8553 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8554 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8555 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8556 return rc;
8557 }
8558 }
8559 }
8560
8561#if defined(IEM_LOG_MEMORY_WRITES)
8562 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8563 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8564 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8565 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8566 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8567 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8568
8569 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8570 g_cbIemWrote = cbWrote;
8571 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8572#endif
8573
8574 /*
8575 * Free the mapping entry.
8576 */
8577 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8578 Assert(pVCpu->iem.s.cActiveMappings != 0);
8579 pVCpu->iem.s.cActiveMappings--;
8580 return VINF_SUCCESS;
8581}
8582
8583
8584/**
8585 * iemMemMap worker that deals with a request crossing pages.
8586 */
8587IEM_STATIC VBOXSTRICTRC
8588iemMemBounceBufferMapCrossPage(PVMCPU pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8589{
8590 /*
8591 * Do the address translations.
8592 */
8593 RTGCPHYS GCPhysFirst;
8594 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8595 if (rcStrict != VINF_SUCCESS)
8596 return rcStrict;
8597
8598 RTGCPHYS GCPhysSecond;
8599 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8600 fAccess, &GCPhysSecond);
8601 if (rcStrict != VINF_SUCCESS)
8602 return rcStrict;
8603 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8604
8605#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8606 /*
8607 * Check if we need to cause an APIC-access VM-exit.
8608 *
8609 * The reason we do have to check whether the access is to be virtualized here is that
8610 * we already know we're crossing a page-boundary. Any cross-page access (which is at
8611 * most 4 bytes) involves accessing offsets prior to XAPIC_OFF_ID or extends well beyond
8612 * XAPIC_OFF_END + 4 bytes of the APIC-access page and hence must cause a VM-exit.
8613 */
8614 if ( CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))
8615 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8616 {
8617 RTGCPHYS const GCPhysMemFirst = GCPhysFirst & ~(RTGCPHYS)PAGE_OFFSET_MASK;
8618 RTGCPHYS const GCPhysMemSecond = GCPhysSecond & ~(RTGCPHYS)PAGE_OFFSET_MASK;
8619 RTGCPHYS const GCPhysApicAccessBase = CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu))
8620 & ~(RTGCPHYS)PAGE_OFFSET_MASK;
8621 if ( GCPhysMemFirst == GCPhysApicAccessBase
8622 || GCPhysMemSecond == GCPhysApicAccessBase)
8623 {
8624 uint16_t const offAccess = GCPhysFirst & (RTGCPHYS)PAGE_OFFSET_MASK;
8625 IEM_VMX_VMEXIT_APIC_ACCESS_RET(pVCpu, offAccess, fAccess);
8626 }
8627 }
8628#endif
8629
8630 PVM pVM = pVCpu->CTX_SUFF(pVM);
8631
8632 /*
8633 * Read in the current memory content if it's a read, execute or partial
8634 * write access.
8635 */
8636 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8637 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8638 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8639
8640 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8641 {
8642 if (!pVCpu->iem.s.fBypassHandlers)
8643 {
8644 /*
8645 * Must carefully deal with access handler status codes here,
8646 * makes the code a bit bloated.
8647 */
8648 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8649 if (rcStrict == VINF_SUCCESS)
8650 {
8651 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8652 if (rcStrict == VINF_SUCCESS)
8653 { /*likely */ }
8654 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8655 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8656 else
8657 {
8658 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8659 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8660 return rcStrict;
8661 }
8662 }
8663 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8664 {
8665 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8666 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8667 {
8668 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8669 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8670 }
8671 else
8672 {
8673 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8674 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8675 return rcStrict2;
8676 }
8677 }
8678 else
8679 {
8680 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8681 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8682 return rcStrict;
8683 }
8684 }
8685 else
8686 {
8687 /*
8688 * No informational status codes here, much more straight forward.
8689 */
8690 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8691 if (RT_SUCCESS(rc))
8692 {
8693 Assert(rc == VINF_SUCCESS);
8694 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8695 if (RT_SUCCESS(rc))
8696 Assert(rc == VINF_SUCCESS);
8697 else
8698 {
8699 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8700 return rc;
8701 }
8702 }
8703 else
8704 {
8705 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8706 return rc;
8707 }
8708 }
8709 }
8710#ifdef VBOX_STRICT
8711 else
8712 memset(pbBuf, 0xcc, cbMem);
8713 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8714 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8715#endif
8716
8717 /*
8718 * Commit the bounce buffer entry.
8719 */
8720 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8721 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8722 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8723 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8724 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8725 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8726 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8727 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8728 pVCpu->iem.s.cActiveMappings++;
8729
8730 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8731 *ppvMem = pbBuf;
8732 return VINF_SUCCESS;
8733}
8734
8735
8736/**
8737 * iemMemMap woker that deals with iemMemPageMap failures.
8738 */
8739IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPU pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8740 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8741{
8742 /*
8743 * Filter out conditions we can handle and the ones which shouldn't happen.
8744 */
8745 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8746 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8747 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8748 {
8749 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8750 return rcMap;
8751 }
8752 pVCpu->iem.s.cPotentialExits++;
8753
8754 /*
8755 * Read in the current memory content if it's a read, execute or partial
8756 * write access.
8757 */
8758 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8759 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8760 {
8761 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8762 memset(pbBuf, 0xff, cbMem);
8763 else
8764 {
8765 int rc;
8766 if (!pVCpu->iem.s.fBypassHandlers)
8767 {
8768 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8769 if (rcStrict == VINF_SUCCESS)
8770 { /* nothing */ }
8771 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8772 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8773 else
8774 {
8775 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8776 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8777 return rcStrict;
8778 }
8779 }
8780 else
8781 {
8782 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8783 if (RT_SUCCESS(rc))
8784 { /* likely */ }
8785 else
8786 {
8787 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8788 GCPhysFirst, rc));
8789 return rc;
8790 }
8791 }
8792 }
8793 }
8794#ifdef VBOX_STRICT
8795 else
8796 memset(pbBuf, 0xcc, cbMem);
8797#endif
8798#ifdef VBOX_STRICT
8799 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8800 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8801#endif
8802
8803 /*
8804 * Commit the bounce buffer entry.
8805 */
8806 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8807 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8808 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8809 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8810 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8811 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8812 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8813 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8814 pVCpu->iem.s.cActiveMappings++;
8815
8816 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8817 *ppvMem = pbBuf;
8818 return VINF_SUCCESS;
8819}
8820
8821
8822
8823/**
8824 * Maps the specified guest memory for the given kind of access.
8825 *
8826 * This may be using bounce buffering of the memory if it's crossing a page
8827 * boundary or if there is an access handler installed for any of it. Because
8828 * of lock prefix guarantees, we're in for some extra clutter when this
8829 * happens.
8830 *
8831 * This may raise a \#GP, \#SS, \#PF or \#AC.
8832 *
8833 * @returns VBox strict status code.
8834 *
8835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8836 * @param ppvMem Where to return the pointer to the mapped
8837 * memory.
8838 * @param cbMem The number of bytes to map. This is usually 1,
8839 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8840 * string operations it can be up to a page.
8841 * @param iSegReg The index of the segment register to use for
8842 * this access. The base and limits are checked.
8843 * Use UINT8_MAX to indicate that no segmentation
8844 * is required (for IDT, GDT and LDT accesses).
8845 * @param GCPtrMem The address of the guest memory.
8846 * @param fAccess How the memory is being accessed. The
8847 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8848 * how to map the memory, while the
8849 * IEM_ACCESS_WHAT_XXX bit is used when raising
8850 * exceptions.
8851 */
8852IEM_STATIC VBOXSTRICTRC
8853iemMemMap(PVMCPU pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8854{
8855 /*
8856 * Check the input and figure out which mapping entry to use.
8857 */
8858 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8859 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8860 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8861
8862 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8863 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8864 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8865 {
8866 iMemMap = iemMemMapFindFree(pVCpu);
8867 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8868 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8869 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8870 pVCpu->iem.s.aMemMappings[2].fAccess),
8871 VERR_IEM_IPE_9);
8872 }
8873
8874 /*
8875 * Map the memory, checking that we can actually access it. If something
8876 * slightly complicated happens, fall back on bounce buffering.
8877 */
8878 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8879 if (rcStrict != VINF_SUCCESS)
8880 return rcStrict;
8881
8882 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8883 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8884
8885 RTGCPHYS GCPhysFirst;
8886 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8887 if (rcStrict != VINF_SUCCESS)
8888 return rcStrict;
8889
8890 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8891 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8892 if (fAccess & IEM_ACCESS_TYPE_READ)
8893 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8894
8895 void *pvMem;
8896 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8897 if (rcStrict != VINF_SUCCESS)
8898 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8899
8900 /*
8901 * Fill in the mapping table entry.
8902 */
8903 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8904 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8905 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8906 pVCpu->iem.s.cActiveMappings++;
8907
8908 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8909 *ppvMem = pvMem;
8910
8911#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
8912 /*
8913 * Check if this is an APIC-access and whether it needs to be virtualized.
8914 */
8915 if ( CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu))
8916 && IEM_VMX_IS_PROCCTLS2_SET(pVCpu, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
8917 {
8918 RTGCPHYS const GCPhysMemAccessBase = GCPhysFirst & ~(RTGCPHYS)PAGE_OFFSET_MASK;
8919 RTGCPHYS const GCPhysApicAccessBase = CPUMGetGuestVmxApicAccessPageAddr(pVCpu, IEM_GET_CTX(pVCpu))
8920 & ~(RTGCPHYS)PAGE_OFFSET_MASK;
8921 if (GCPhysMemAccessBase == GCPhysApicAccessBase)
8922 {
8923 Assert(pvMem);
8924 uint16_t const offAccess = GCPhysFirst & (RTGCPHYS)PAGE_OFFSET_MASK;
8925 return iemVmxVirtApicAccessMem(pVCpu, offAccess, cbMem, pvMem, fAccess);
8926 }
8927 }
8928#endif
8929
8930 return VINF_SUCCESS;
8931}
8932
8933
8934/**
8935 * Commits the guest memory if bounce buffered and unmaps it.
8936 *
8937 * @returns Strict VBox status code.
8938 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8939 * @param pvMem The mapping.
8940 * @param fAccess The kind of access.
8941 */
8942IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
8943{
8944 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8945 AssertReturn(iMemMap >= 0, iMemMap);
8946
8947 /* If it's bounce buffered, we may need to write back the buffer. */
8948 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8949 {
8950 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8951 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8952 }
8953 /* Otherwise unlock it. */
8954 else
8955 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8956
8957 /* Free the entry. */
8958 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8959 Assert(pVCpu->iem.s.cActiveMappings != 0);
8960 pVCpu->iem.s.cActiveMappings--;
8961 return VINF_SUCCESS;
8962}
8963
8964#ifdef IEM_WITH_SETJMP
8965
8966/**
8967 * Maps the specified guest memory for the given kind of access, longjmp on
8968 * error.
8969 *
8970 * This may be using bounce buffering of the memory if it's crossing a page
8971 * boundary or if there is an access handler installed for any of it. Because
8972 * of lock prefix guarantees, we're in for some extra clutter when this
8973 * happens.
8974 *
8975 * This may raise a \#GP, \#SS, \#PF or \#AC.
8976 *
8977 * @returns Pointer to the mapped memory.
8978 *
8979 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8980 * @param cbMem The number of bytes to map. This is usually 1,
8981 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8982 * string operations it can be up to a page.
8983 * @param iSegReg The index of the segment register to use for
8984 * this access. The base and limits are checked.
8985 * Use UINT8_MAX to indicate that no segmentation
8986 * is required (for IDT, GDT and LDT accesses).
8987 * @param GCPtrMem The address of the guest memory.
8988 * @param fAccess How the memory is being accessed. The
8989 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8990 * how to map the memory, while the
8991 * IEM_ACCESS_WHAT_XXX bit is used when raising
8992 * exceptions.
8993 */
8994IEM_STATIC void *iemMemMapJmp(PVMCPU pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8995{
8996 /*
8997 * Check the input and figure out which mapping entry to use.
8998 */
8999 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
9000 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
9001 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
9002
9003 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
9004 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
9005 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
9006 {
9007 iMemMap = iemMemMapFindFree(pVCpu);
9008 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
9009 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
9010 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
9011 pVCpu->iem.s.aMemMappings[2].fAccess),
9012 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
9013 }
9014
9015 /*
9016 * Map the memory, checking that we can actually access it. If something
9017 * slightly complicated happens, fall back on bounce buffering.
9018 */
9019 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
9020 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9021 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9022
9023 /* Crossing a page boundary? */
9024 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
9025 { /* No (likely). */ }
9026 else
9027 {
9028 void *pvMem;
9029 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
9030 if (rcStrict == VINF_SUCCESS)
9031 return pvMem;
9032 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9033 }
9034
9035 RTGCPHYS GCPhysFirst;
9036 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
9037 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
9038 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9039
9040 if (fAccess & IEM_ACCESS_TYPE_WRITE)
9041 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9042 if (fAccess & IEM_ACCESS_TYPE_READ)
9043 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
9044
9045 void *pvMem;
9046 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9047 if (rcStrict == VINF_SUCCESS)
9048 { /* likely */ }
9049 else
9050 {
9051 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
9052 if (rcStrict == VINF_SUCCESS)
9053 return pvMem;
9054 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9055 }
9056
9057 /*
9058 * Fill in the mapping table entry.
9059 */
9060 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
9061 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
9062 pVCpu->iem.s.iNextMapping = iMemMap + 1;
9063 pVCpu->iem.s.cActiveMappings++;
9064
9065 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
9066 return pvMem;
9067}
9068
9069
9070/**
9071 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
9072 *
9073 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9074 * @param pvMem The mapping.
9075 * @param fAccess The kind of access.
9076 */
9077IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9078{
9079 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9080 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
9081
9082 /* If it's bounce buffered, we may need to write back the buffer. */
9083 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9084 {
9085 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9086 {
9087 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
9088 if (rcStrict == VINF_SUCCESS)
9089 return;
9090 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9091 }
9092 }
9093 /* Otherwise unlock it. */
9094 else
9095 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9096
9097 /* Free the entry. */
9098 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9099 Assert(pVCpu->iem.s.cActiveMappings != 0);
9100 pVCpu->iem.s.cActiveMappings--;
9101}
9102
9103#endif /* IEM_WITH_SETJMP */
9104
9105#ifndef IN_RING3
9106/**
9107 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9108 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9109 *
9110 * Allows the instruction to be completed and retired, while the IEM user will
9111 * return to ring-3 immediately afterwards and do the postponed writes there.
9112 *
9113 * @returns VBox status code (no strict statuses). Caller must check
9114 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9116 * @param pvMem The mapping.
9117 * @param fAccess The kind of access.
9118 */
9119IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPU pVCpu, void *pvMem, uint32_t fAccess)
9120{
9121 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9122 AssertReturn(iMemMap >= 0, iMemMap);
9123
9124 /* If it's bounce buffered, we may need to write back the buffer. */
9125 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9126 {
9127 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9128 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9129 }
9130 /* Otherwise unlock it. */
9131 else
9132 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9133
9134 /* Free the entry. */
9135 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9136 Assert(pVCpu->iem.s.cActiveMappings != 0);
9137 pVCpu->iem.s.cActiveMappings--;
9138 return VINF_SUCCESS;
9139}
9140#endif
9141
9142
9143/**
9144 * Rollbacks mappings, releasing page locks and such.
9145 *
9146 * The caller shall only call this after checking cActiveMappings.
9147 *
9148 * @returns Strict VBox status code to pass up.
9149 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9150 */
9151IEM_STATIC void iemMemRollback(PVMCPU pVCpu)
9152{
9153 Assert(pVCpu->iem.s.cActiveMappings > 0);
9154
9155 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9156 while (iMemMap-- > 0)
9157 {
9158 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9159 if (fAccess != IEM_ACCESS_INVALID)
9160 {
9161 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9162 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9163 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9164 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9165 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9166 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9167 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9168 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9169 pVCpu->iem.s.cActiveMappings--;
9170 }
9171 }
9172}
9173
9174
9175/**
9176 * Fetches a data byte.
9177 *
9178 * @returns Strict VBox status code.
9179 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9180 * @param pu8Dst Where to return the byte.
9181 * @param iSegReg The index of the segment register to use for
9182 * this access. The base and limits are checked.
9183 * @param GCPtrMem The address of the guest memory.
9184 */
9185IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPU pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9186{
9187 /* The lazy approach for now... */
9188 uint8_t const *pu8Src;
9189 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9190 if (rc == VINF_SUCCESS)
9191 {
9192 *pu8Dst = *pu8Src;
9193 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9194 }
9195 return rc;
9196}
9197
9198
9199#ifdef IEM_WITH_SETJMP
9200/**
9201 * Fetches a data byte, longjmp on error.
9202 *
9203 * @returns The byte.
9204 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9205 * @param iSegReg The index of the segment register to use for
9206 * this access. The base and limits are checked.
9207 * @param GCPtrMem The address of the guest memory.
9208 */
9209DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9210{
9211 /* The lazy approach for now... */
9212 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9213 uint8_t const bRet = *pu8Src;
9214 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9215 return bRet;
9216}
9217#endif /* IEM_WITH_SETJMP */
9218
9219
9220/**
9221 * Fetches a data word.
9222 *
9223 * @returns Strict VBox status code.
9224 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9225 * @param pu16Dst Where to return the word.
9226 * @param iSegReg The index of the segment register to use for
9227 * this access. The base and limits are checked.
9228 * @param GCPtrMem The address of the guest memory.
9229 */
9230IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9231{
9232 /* The lazy approach for now... */
9233 uint16_t const *pu16Src;
9234 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9235 if (rc == VINF_SUCCESS)
9236 {
9237 *pu16Dst = *pu16Src;
9238 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9239 }
9240 return rc;
9241}
9242
9243
9244#ifdef IEM_WITH_SETJMP
9245/**
9246 * Fetches a data word, longjmp on error.
9247 *
9248 * @returns The word
9249 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9250 * @param iSegReg The index of the segment register to use for
9251 * this access. The base and limits are checked.
9252 * @param GCPtrMem The address of the guest memory.
9253 */
9254DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9255{
9256 /* The lazy approach for now... */
9257 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9258 uint16_t const u16Ret = *pu16Src;
9259 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9260 return u16Ret;
9261}
9262#endif
9263
9264
9265/**
9266 * Fetches a data dword.
9267 *
9268 * @returns Strict VBox status code.
9269 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9270 * @param pu32Dst Where to return the dword.
9271 * @param iSegReg The index of the segment register to use for
9272 * this access. The base and limits are checked.
9273 * @param GCPtrMem The address of the guest memory.
9274 */
9275IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9276{
9277 /* The lazy approach for now... */
9278 uint32_t const *pu32Src;
9279 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9280 if (rc == VINF_SUCCESS)
9281 {
9282 *pu32Dst = *pu32Src;
9283 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9284 }
9285 return rc;
9286}
9287
9288
9289#ifdef IEM_WITH_SETJMP
9290
9291IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9292{
9293 Assert(cbMem >= 1);
9294 Assert(iSegReg < X86_SREG_COUNT);
9295
9296 /*
9297 * 64-bit mode is simpler.
9298 */
9299 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9300 {
9301 if (iSegReg >= X86_SREG_FS)
9302 {
9303 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9304 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9305 GCPtrMem += pSel->u64Base;
9306 }
9307
9308 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9309 return GCPtrMem;
9310 }
9311 /*
9312 * 16-bit and 32-bit segmentation.
9313 */
9314 else
9315 {
9316 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9317 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9318 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9319 == X86DESCATTR_P /* data, expand up */
9320 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9321 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9322 {
9323 /* expand up */
9324 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9325 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9326 && GCPtrLast32 > (uint32_t)GCPtrMem))
9327 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9328 }
9329 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9330 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9331 {
9332 /* expand down */
9333 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9334 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9335 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9336 && GCPtrLast32 > (uint32_t)GCPtrMem))
9337 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9338 }
9339 else
9340 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9341 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9342 }
9343 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9344}
9345
9346
9347IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPU pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9348{
9349 Assert(cbMem >= 1);
9350 Assert(iSegReg < X86_SREG_COUNT);
9351
9352 /*
9353 * 64-bit mode is simpler.
9354 */
9355 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9356 {
9357 if (iSegReg >= X86_SREG_FS)
9358 {
9359 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9360 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9361 GCPtrMem += pSel->u64Base;
9362 }
9363
9364 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9365 return GCPtrMem;
9366 }
9367 /*
9368 * 16-bit and 32-bit segmentation.
9369 */
9370 else
9371 {
9372 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9373 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9374 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9375 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9376 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9377 {
9378 /* expand up */
9379 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9380 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9381 && GCPtrLast32 > (uint32_t)GCPtrMem))
9382 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9383 }
9384 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9385 {
9386 /* expand down */
9387 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9388 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9389 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9390 && GCPtrLast32 > (uint32_t)GCPtrMem))
9391 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9392 }
9393 else
9394 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9395 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9396 }
9397 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9398}
9399
9400
9401/**
9402 * Fetches a data dword, longjmp on error, fallback/safe version.
9403 *
9404 * @returns The dword
9405 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9406 * @param iSegReg The index of the segment register to use for
9407 * this access. The base and limits are checked.
9408 * @param GCPtrMem The address of the guest memory.
9409 */
9410IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9411{
9412 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9413 uint32_t const u32Ret = *pu32Src;
9414 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9415 return u32Ret;
9416}
9417
9418
9419/**
9420 * Fetches a data dword, longjmp on error.
9421 *
9422 * @returns The dword
9423 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9424 * @param iSegReg The index of the segment register to use for
9425 * this access. The base and limits are checked.
9426 * @param GCPtrMem The address of the guest memory.
9427 */
9428DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9429{
9430# ifdef IEM_WITH_DATA_TLB
9431 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9432 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9433 {
9434 /// @todo more later.
9435 }
9436
9437 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9438# else
9439 /* The lazy approach. */
9440 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9441 uint32_t const u32Ret = *pu32Src;
9442 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9443 return u32Ret;
9444# endif
9445}
9446#endif
9447
9448
9449#ifdef SOME_UNUSED_FUNCTION
9450/**
9451 * Fetches a data dword and sign extends it to a qword.
9452 *
9453 * @returns Strict VBox status code.
9454 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9455 * @param pu64Dst Where to return the sign extended value.
9456 * @param iSegReg The index of the segment register to use for
9457 * this access. The base and limits are checked.
9458 * @param GCPtrMem The address of the guest memory.
9459 */
9460IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9461{
9462 /* The lazy approach for now... */
9463 int32_t const *pi32Src;
9464 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9465 if (rc == VINF_SUCCESS)
9466 {
9467 *pu64Dst = *pi32Src;
9468 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9469 }
9470#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9471 else
9472 *pu64Dst = 0;
9473#endif
9474 return rc;
9475}
9476#endif
9477
9478
9479/**
9480 * Fetches a data qword.
9481 *
9482 * @returns Strict VBox status code.
9483 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9484 * @param pu64Dst Where to return the qword.
9485 * @param iSegReg The index of the segment register to use for
9486 * this access. The base and limits are checked.
9487 * @param GCPtrMem The address of the guest memory.
9488 */
9489IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9490{
9491 /* The lazy approach for now... */
9492 uint64_t const *pu64Src;
9493 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9494 if (rc == VINF_SUCCESS)
9495 {
9496 *pu64Dst = *pu64Src;
9497 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9498 }
9499 return rc;
9500}
9501
9502
9503#ifdef IEM_WITH_SETJMP
9504/**
9505 * Fetches a data qword, longjmp on error.
9506 *
9507 * @returns The qword.
9508 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9509 * @param iSegReg The index of the segment register to use for
9510 * this access. The base and limits are checked.
9511 * @param GCPtrMem The address of the guest memory.
9512 */
9513DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9514{
9515 /* The lazy approach for now... */
9516 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9517 uint64_t const u64Ret = *pu64Src;
9518 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9519 return u64Ret;
9520}
9521#endif
9522
9523
9524/**
9525 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9526 *
9527 * @returns Strict VBox status code.
9528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9529 * @param pu64Dst Where to return the qword.
9530 * @param iSegReg The index of the segment register to use for
9531 * this access. The base and limits are checked.
9532 * @param GCPtrMem The address of the guest memory.
9533 */
9534IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9535{
9536 /* The lazy approach for now... */
9537 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9538 if (RT_UNLIKELY(GCPtrMem & 15))
9539 return iemRaiseGeneralProtectionFault0(pVCpu);
9540
9541 uint64_t const *pu64Src;
9542 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9543 if (rc == VINF_SUCCESS)
9544 {
9545 *pu64Dst = *pu64Src;
9546 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9547 }
9548 return rc;
9549}
9550
9551
9552#ifdef IEM_WITH_SETJMP
9553/**
9554 * Fetches a data qword, longjmp on error.
9555 *
9556 * @returns The qword.
9557 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9558 * @param iSegReg The index of the segment register to use for
9559 * this access. The base and limits are checked.
9560 * @param GCPtrMem The address of the guest memory.
9561 */
9562DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9563{
9564 /* The lazy approach for now... */
9565 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9566 if (RT_LIKELY(!(GCPtrMem & 15)))
9567 {
9568 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9569 uint64_t const u64Ret = *pu64Src;
9570 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9571 return u64Ret;
9572 }
9573
9574 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9575 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9576}
9577#endif
9578
9579
9580/**
9581 * Fetches a data tword.
9582 *
9583 * @returns Strict VBox status code.
9584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9585 * @param pr80Dst Where to return the tword.
9586 * @param iSegReg The index of the segment register to use for
9587 * this access. The base and limits are checked.
9588 * @param GCPtrMem The address of the guest memory.
9589 */
9590IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9591{
9592 /* The lazy approach for now... */
9593 PCRTFLOAT80U pr80Src;
9594 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9595 if (rc == VINF_SUCCESS)
9596 {
9597 *pr80Dst = *pr80Src;
9598 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9599 }
9600 return rc;
9601}
9602
9603
9604#ifdef IEM_WITH_SETJMP
9605/**
9606 * Fetches a data tword, longjmp on error.
9607 *
9608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9609 * @param pr80Dst Where to return the tword.
9610 * @param iSegReg The index of the segment register to use for
9611 * this access. The base and limits are checked.
9612 * @param GCPtrMem The address of the guest memory.
9613 */
9614DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPU pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9615{
9616 /* The lazy approach for now... */
9617 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9618 *pr80Dst = *pr80Src;
9619 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9620}
9621#endif
9622
9623
9624/**
9625 * Fetches a data dqword (double qword), generally SSE related.
9626 *
9627 * @returns Strict VBox status code.
9628 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9629 * @param pu128Dst Where to return the qword.
9630 * @param iSegReg The index of the segment register to use for
9631 * this access. The base and limits are checked.
9632 * @param GCPtrMem The address of the guest memory.
9633 */
9634IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9635{
9636 /* The lazy approach for now... */
9637 PCRTUINT128U pu128Src;
9638 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9639 if (rc == VINF_SUCCESS)
9640 {
9641 pu128Dst->au64[0] = pu128Src->au64[0];
9642 pu128Dst->au64[1] = pu128Src->au64[1];
9643 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9644 }
9645 return rc;
9646}
9647
9648
9649#ifdef IEM_WITH_SETJMP
9650/**
9651 * Fetches a data dqword (double qword), generally SSE related.
9652 *
9653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9654 * @param pu128Dst Where to return the qword.
9655 * @param iSegReg The index of the segment register to use for
9656 * this access. The base and limits are checked.
9657 * @param GCPtrMem The address of the guest memory.
9658 */
9659IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9660{
9661 /* The lazy approach for now... */
9662 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9663 pu128Dst->au64[0] = pu128Src->au64[0];
9664 pu128Dst->au64[1] = pu128Src->au64[1];
9665 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9666}
9667#endif
9668
9669
9670/**
9671 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9672 * related.
9673 *
9674 * Raises \#GP(0) if not aligned.
9675 *
9676 * @returns Strict VBox status code.
9677 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9678 * @param pu128Dst Where to return the qword.
9679 * @param iSegReg The index of the segment register to use for
9680 * this access. The base and limits are checked.
9681 * @param GCPtrMem The address of the guest memory.
9682 */
9683IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9684{
9685 /* The lazy approach for now... */
9686 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9687 if ( (GCPtrMem & 15)
9688 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9689 return iemRaiseGeneralProtectionFault0(pVCpu);
9690
9691 PCRTUINT128U pu128Src;
9692 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9693 if (rc == VINF_SUCCESS)
9694 {
9695 pu128Dst->au64[0] = pu128Src->au64[0];
9696 pu128Dst->au64[1] = pu128Src->au64[1];
9697 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9698 }
9699 return rc;
9700}
9701
9702
9703#ifdef IEM_WITH_SETJMP
9704/**
9705 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9706 * related, longjmp on error.
9707 *
9708 * Raises \#GP(0) if not aligned.
9709 *
9710 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9711 * @param pu128Dst Where to return the qword.
9712 * @param iSegReg The index of the segment register to use for
9713 * this access. The base and limits are checked.
9714 * @param GCPtrMem The address of the guest memory.
9715 */
9716DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPU pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9717{
9718 /* The lazy approach for now... */
9719 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9720 if ( (GCPtrMem & 15) == 0
9721 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9722 {
9723 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9724 pu128Dst->au64[0] = pu128Src->au64[0];
9725 pu128Dst->au64[1] = pu128Src->au64[1];
9726 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9727 return;
9728 }
9729
9730 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9731 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9732}
9733#endif
9734
9735
9736/**
9737 * Fetches a data oword (octo word), generally AVX related.
9738 *
9739 * @returns Strict VBox status code.
9740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9741 * @param pu256Dst Where to return the qword.
9742 * @param iSegReg The index of the segment register to use for
9743 * this access. The base and limits are checked.
9744 * @param GCPtrMem The address of the guest memory.
9745 */
9746IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9747{
9748 /* The lazy approach for now... */
9749 PCRTUINT256U pu256Src;
9750 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9751 if (rc == VINF_SUCCESS)
9752 {
9753 pu256Dst->au64[0] = pu256Src->au64[0];
9754 pu256Dst->au64[1] = pu256Src->au64[1];
9755 pu256Dst->au64[2] = pu256Src->au64[2];
9756 pu256Dst->au64[3] = pu256Src->au64[3];
9757 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9758 }
9759 return rc;
9760}
9761
9762
9763#ifdef IEM_WITH_SETJMP
9764/**
9765 * Fetches a data oword (octo word), generally AVX related.
9766 *
9767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9768 * @param pu256Dst Where to return the qword.
9769 * @param iSegReg The index of the segment register to use for
9770 * this access. The base and limits are checked.
9771 * @param GCPtrMem The address of the guest memory.
9772 */
9773IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9774{
9775 /* The lazy approach for now... */
9776 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9777 pu256Dst->au64[0] = pu256Src->au64[0];
9778 pu256Dst->au64[1] = pu256Src->au64[1];
9779 pu256Dst->au64[2] = pu256Src->au64[2];
9780 pu256Dst->au64[3] = pu256Src->au64[3];
9781 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9782}
9783#endif
9784
9785
9786/**
9787 * Fetches a data oword (octo word) at an aligned address, generally AVX
9788 * related.
9789 *
9790 * Raises \#GP(0) if not aligned.
9791 *
9792 * @returns Strict VBox status code.
9793 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9794 * @param pu256Dst Where to return the qword.
9795 * @param iSegReg The index of the segment register to use for
9796 * this access. The base and limits are checked.
9797 * @param GCPtrMem The address of the guest memory.
9798 */
9799IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9800{
9801 /* The lazy approach for now... */
9802 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9803 if (GCPtrMem & 31)
9804 return iemRaiseGeneralProtectionFault0(pVCpu);
9805
9806 PCRTUINT256U pu256Src;
9807 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9808 if (rc == VINF_SUCCESS)
9809 {
9810 pu256Dst->au64[0] = pu256Src->au64[0];
9811 pu256Dst->au64[1] = pu256Src->au64[1];
9812 pu256Dst->au64[2] = pu256Src->au64[2];
9813 pu256Dst->au64[3] = pu256Src->au64[3];
9814 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9815 }
9816 return rc;
9817}
9818
9819
9820#ifdef IEM_WITH_SETJMP
9821/**
9822 * Fetches a data oword (octo word) at an aligned address, generally AVX
9823 * related, longjmp on error.
9824 *
9825 * Raises \#GP(0) if not aligned.
9826 *
9827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9828 * @param pu256Dst Where to return the qword.
9829 * @param iSegReg The index of the segment register to use for
9830 * this access. The base and limits are checked.
9831 * @param GCPtrMem The address of the guest memory.
9832 */
9833DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPU pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9834{
9835 /* The lazy approach for now... */
9836 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9837 if ((GCPtrMem & 31) == 0)
9838 {
9839 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9840 pu256Dst->au64[0] = pu256Src->au64[0];
9841 pu256Dst->au64[1] = pu256Src->au64[1];
9842 pu256Dst->au64[2] = pu256Src->au64[2];
9843 pu256Dst->au64[3] = pu256Src->au64[3];
9844 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9845 return;
9846 }
9847
9848 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9849 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9850}
9851#endif
9852
9853
9854
9855/**
9856 * Fetches a descriptor register (lgdt, lidt).
9857 *
9858 * @returns Strict VBox status code.
9859 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9860 * @param pcbLimit Where to return the limit.
9861 * @param pGCPtrBase Where to return the base.
9862 * @param iSegReg The index of the segment register to use for
9863 * this access. The base and limits are checked.
9864 * @param GCPtrMem The address of the guest memory.
9865 * @param enmOpSize The effective operand size.
9866 */
9867IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPU pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9868 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9869{
9870 /*
9871 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9872 * little special:
9873 * - The two reads are done separately.
9874 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9875 * - We suspect the 386 to actually commit the limit before the base in
9876 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9877 * don't try emulate this eccentric behavior, because it's not well
9878 * enough understood and rather hard to trigger.
9879 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9880 */
9881 VBOXSTRICTRC rcStrict;
9882 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9883 {
9884 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9885 if (rcStrict == VINF_SUCCESS)
9886 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9887 }
9888 else
9889 {
9890 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9891 if (enmOpSize == IEMMODE_32BIT)
9892 {
9893 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9894 {
9895 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9896 if (rcStrict == VINF_SUCCESS)
9897 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9898 }
9899 else
9900 {
9901 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9902 if (rcStrict == VINF_SUCCESS)
9903 {
9904 *pcbLimit = (uint16_t)uTmp;
9905 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9906 }
9907 }
9908 if (rcStrict == VINF_SUCCESS)
9909 *pGCPtrBase = uTmp;
9910 }
9911 else
9912 {
9913 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9914 if (rcStrict == VINF_SUCCESS)
9915 {
9916 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9917 if (rcStrict == VINF_SUCCESS)
9918 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9919 }
9920 }
9921 }
9922 return rcStrict;
9923}
9924
9925
9926
9927/**
9928 * Stores a data byte.
9929 *
9930 * @returns Strict VBox status code.
9931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9932 * @param iSegReg The index of the segment register to use for
9933 * this access. The base and limits are checked.
9934 * @param GCPtrMem The address of the guest memory.
9935 * @param u8Value The value to store.
9936 */
9937IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9938{
9939 /* The lazy approach for now... */
9940 uint8_t *pu8Dst;
9941 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9942 if (rc == VINF_SUCCESS)
9943 {
9944 *pu8Dst = u8Value;
9945 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9946 }
9947 return rc;
9948}
9949
9950
9951#ifdef IEM_WITH_SETJMP
9952/**
9953 * Stores a data byte, longjmp on error.
9954 *
9955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9956 * @param iSegReg The index of the segment register to use for
9957 * this access. The base and limits are checked.
9958 * @param GCPtrMem The address of the guest memory.
9959 * @param u8Value The value to store.
9960 */
9961IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9962{
9963 /* The lazy approach for now... */
9964 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9965 *pu8Dst = u8Value;
9966 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9967}
9968#endif
9969
9970
9971/**
9972 * Stores a data word.
9973 *
9974 * @returns Strict VBox status code.
9975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9976 * @param iSegReg The index of the segment register to use for
9977 * this access. The base and limits are checked.
9978 * @param GCPtrMem The address of the guest memory.
9979 * @param u16Value The value to store.
9980 */
9981IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9982{
9983 /* The lazy approach for now... */
9984 uint16_t *pu16Dst;
9985 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9986 if (rc == VINF_SUCCESS)
9987 {
9988 *pu16Dst = u16Value;
9989 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9990 }
9991 return rc;
9992}
9993
9994
9995#ifdef IEM_WITH_SETJMP
9996/**
9997 * Stores a data word, longjmp on error.
9998 *
9999 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10000 * @param iSegReg The index of the segment register to use for
10001 * this access. The base and limits are checked.
10002 * @param GCPtrMem The address of the guest memory.
10003 * @param u16Value The value to store.
10004 */
10005IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
10006{
10007 /* The lazy approach for now... */
10008 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10009 *pu16Dst = u16Value;
10010 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
10011}
10012#endif
10013
10014
10015/**
10016 * Stores a data dword.
10017 *
10018 * @returns Strict VBox status code.
10019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10020 * @param iSegReg The index of the segment register to use for
10021 * this access. The base and limits are checked.
10022 * @param GCPtrMem The address of the guest memory.
10023 * @param u32Value The value to store.
10024 */
10025IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10026{
10027 /* The lazy approach for now... */
10028 uint32_t *pu32Dst;
10029 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10030 if (rc == VINF_SUCCESS)
10031 {
10032 *pu32Dst = u32Value;
10033 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10034 }
10035 return rc;
10036}
10037
10038
10039#ifdef IEM_WITH_SETJMP
10040/**
10041 * Stores a data dword.
10042 *
10043 * @returns Strict VBox status code.
10044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10045 * @param iSegReg The index of the segment register to use for
10046 * this access. The base and limits are checked.
10047 * @param GCPtrMem The address of the guest memory.
10048 * @param u32Value The value to store.
10049 */
10050IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
10051{
10052 /* The lazy approach for now... */
10053 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10054 *pu32Dst = u32Value;
10055 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
10056}
10057#endif
10058
10059
10060/**
10061 * Stores a data qword.
10062 *
10063 * @returns Strict VBox status code.
10064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10065 * @param iSegReg The index of the segment register to use for
10066 * this access. The base and limits are checked.
10067 * @param GCPtrMem The address of the guest memory.
10068 * @param u64Value The value to store.
10069 */
10070IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10071{
10072 /* The lazy approach for now... */
10073 uint64_t *pu64Dst;
10074 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10075 if (rc == VINF_SUCCESS)
10076 {
10077 *pu64Dst = u64Value;
10078 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10079 }
10080 return rc;
10081}
10082
10083
10084#ifdef IEM_WITH_SETJMP
10085/**
10086 * Stores a data qword, longjmp on error.
10087 *
10088 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10089 * @param iSegReg The index of the segment register to use for
10090 * this access. The base and limits are checked.
10091 * @param GCPtrMem The address of the guest memory.
10092 * @param u64Value The value to store.
10093 */
10094IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10095{
10096 /* The lazy approach for now... */
10097 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10098 *pu64Dst = u64Value;
10099 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10100}
10101#endif
10102
10103
10104/**
10105 * Stores a data dqword.
10106 *
10107 * @returns Strict VBox status code.
10108 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10109 * @param iSegReg The index of the segment register to use for
10110 * this access. The base and limits are checked.
10111 * @param GCPtrMem The address of the guest memory.
10112 * @param u128Value The value to store.
10113 */
10114IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10115{
10116 /* The lazy approach for now... */
10117 PRTUINT128U pu128Dst;
10118 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10119 if (rc == VINF_SUCCESS)
10120 {
10121 pu128Dst->au64[0] = u128Value.au64[0];
10122 pu128Dst->au64[1] = u128Value.au64[1];
10123 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10124 }
10125 return rc;
10126}
10127
10128
10129#ifdef IEM_WITH_SETJMP
10130/**
10131 * Stores a data dqword, longjmp on error.
10132 *
10133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10134 * @param iSegReg The index of the segment register to use for
10135 * this access. The base and limits are checked.
10136 * @param GCPtrMem The address of the guest memory.
10137 * @param u128Value The value to store.
10138 */
10139IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10140{
10141 /* The lazy approach for now... */
10142 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10143 pu128Dst->au64[0] = u128Value.au64[0];
10144 pu128Dst->au64[1] = u128Value.au64[1];
10145 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10146}
10147#endif
10148
10149
10150/**
10151 * Stores a data dqword, SSE aligned.
10152 *
10153 * @returns Strict VBox status code.
10154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10155 * @param iSegReg The index of the segment register to use for
10156 * this access. The base and limits are checked.
10157 * @param GCPtrMem The address of the guest memory.
10158 * @param u128Value The value to store.
10159 */
10160IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10161{
10162 /* The lazy approach for now... */
10163 if ( (GCPtrMem & 15)
10164 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10165 return iemRaiseGeneralProtectionFault0(pVCpu);
10166
10167 PRTUINT128U pu128Dst;
10168 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10169 if (rc == VINF_SUCCESS)
10170 {
10171 pu128Dst->au64[0] = u128Value.au64[0];
10172 pu128Dst->au64[1] = u128Value.au64[1];
10173 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10174 }
10175 return rc;
10176}
10177
10178
10179#ifdef IEM_WITH_SETJMP
10180/**
10181 * Stores a data dqword, SSE aligned.
10182 *
10183 * @returns Strict VBox status code.
10184 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10185 * @param iSegReg The index of the segment register to use for
10186 * this access. The base and limits are checked.
10187 * @param GCPtrMem The address of the guest memory.
10188 * @param u128Value The value to store.
10189 */
10190DECL_NO_INLINE(IEM_STATIC, void)
10191iemMemStoreDataU128AlignedSseJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10192{
10193 /* The lazy approach for now... */
10194 if ( (GCPtrMem & 15) == 0
10195 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10196 {
10197 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10198 pu128Dst->au64[0] = u128Value.au64[0];
10199 pu128Dst->au64[1] = u128Value.au64[1];
10200 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10201 return;
10202 }
10203
10204 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10205 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10206}
10207#endif
10208
10209
10210/**
10211 * Stores a data dqword.
10212 *
10213 * @returns Strict VBox status code.
10214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10215 * @param iSegReg The index of the segment register to use for
10216 * this access. The base and limits are checked.
10217 * @param GCPtrMem The address of the guest memory.
10218 * @param pu256Value Pointer to the value to store.
10219 */
10220IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10221{
10222 /* The lazy approach for now... */
10223 PRTUINT256U pu256Dst;
10224 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10225 if (rc == VINF_SUCCESS)
10226 {
10227 pu256Dst->au64[0] = pu256Value->au64[0];
10228 pu256Dst->au64[1] = pu256Value->au64[1];
10229 pu256Dst->au64[2] = pu256Value->au64[2];
10230 pu256Dst->au64[3] = pu256Value->au64[3];
10231 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10232 }
10233 return rc;
10234}
10235
10236
10237#ifdef IEM_WITH_SETJMP
10238/**
10239 * Stores a data dqword, longjmp on error.
10240 *
10241 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10242 * @param iSegReg The index of the segment register to use for
10243 * this access. The base and limits are checked.
10244 * @param GCPtrMem The address of the guest memory.
10245 * @param pu256Value Pointer to the value to store.
10246 */
10247IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10248{
10249 /* The lazy approach for now... */
10250 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10251 pu256Dst->au64[0] = pu256Value->au64[0];
10252 pu256Dst->au64[1] = pu256Value->au64[1];
10253 pu256Dst->au64[2] = pu256Value->au64[2];
10254 pu256Dst->au64[3] = pu256Value->au64[3];
10255 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10256}
10257#endif
10258
10259
10260/**
10261 * Stores a data dqword, AVX aligned.
10262 *
10263 * @returns Strict VBox status code.
10264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10265 * @param iSegReg The index of the segment register to use for
10266 * this access. The base and limits are checked.
10267 * @param GCPtrMem The address of the guest memory.
10268 * @param pu256Value Pointer to the value to store.
10269 */
10270IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10271{
10272 /* The lazy approach for now... */
10273 if (GCPtrMem & 31)
10274 return iemRaiseGeneralProtectionFault0(pVCpu);
10275
10276 PRTUINT256U pu256Dst;
10277 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10278 if (rc == VINF_SUCCESS)
10279 {
10280 pu256Dst->au64[0] = pu256Value->au64[0];
10281 pu256Dst->au64[1] = pu256Value->au64[1];
10282 pu256Dst->au64[2] = pu256Value->au64[2];
10283 pu256Dst->au64[3] = pu256Value->au64[3];
10284 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10285 }
10286 return rc;
10287}
10288
10289
10290#ifdef IEM_WITH_SETJMP
10291/**
10292 * Stores a data dqword, AVX aligned.
10293 *
10294 * @returns Strict VBox status code.
10295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10296 * @param iSegReg The index of the segment register to use for
10297 * this access. The base and limits are checked.
10298 * @param GCPtrMem The address of the guest memory.
10299 * @param pu256Value Pointer to the value to store.
10300 */
10301DECL_NO_INLINE(IEM_STATIC, void)
10302iemMemStoreDataU256AlignedAvxJmp(PVMCPU pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10303{
10304 /* The lazy approach for now... */
10305 if ((GCPtrMem & 31) == 0)
10306 {
10307 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10308 pu256Dst->au64[0] = pu256Value->au64[0];
10309 pu256Dst->au64[1] = pu256Value->au64[1];
10310 pu256Dst->au64[2] = pu256Value->au64[2];
10311 pu256Dst->au64[3] = pu256Value->au64[3];
10312 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10313 return;
10314 }
10315
10316 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10317 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10318}
10319#endif
10320
10321
10322/**
10323 * Stores a descriptor register (sgdt, sidt).
10324 *
10325 * @returns Strict VBox status code.
10326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10327 * @param cbLimit The limit.
10328 * @param GCPtrBase The base address.
10329 * @param iSegReg The index of the segment register to use for
10330 * this access. The base and limits are checked.
10331 * @param GCPtrMem The address of the guest memory.
10332 */
10333IEM_STATIC VBOXSTRICTRC
10334iemMemStoreDataXdtr(PVMCPU pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10335{
10336 /*
10337 * The SIDT and SGDT instructions actually stores the data using two
10338 * independent writes. The instructions does not respond to opsize prefixes.
10339 */
10340 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10341 if (rcStrict == VINF_SUCCESS)
10342 {
10343 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10344 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10345 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10346 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10347 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10348 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10349 else
10350 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10351 }
10352 return rcStrict;
10353}
10354
10355
10356/**
10357 * Pushes a word onto the stack.
10358 *
10359 * @returns Strict VBox status code.
10360 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10361 * @param u16Value The value to push.
10362 */
10363IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPU pVCpu, uint16_t u16Value)
10364{
10365 /* Increment the stack pointer. */
10366 uint64_t uNewRsp;
10367 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10368
10369 /* Write the word the lazy way. */
10370 uint16_t *pu16Dst;
10371 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10372 if (rc == VINF_SUCCESS)
10373 {
10374 *pu16Dst = u16Value;
10375 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10376 }
10377
10378 /* Commit the new RSP value unless we an access handler made trouble. */
10379 if (rc == VINF_SUCCESS)
10380 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10381
10382 return rc;
10383}
10384
10385
10386/**
10387 * Pushes a dword onto the stack.
10388 *
10389 * @returns Strict VBox status code.
10390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10391 * @param u32Value The value to push.
10392 */
10393IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPU pVCpu, uint32_t u32Value)
10394{
10395 /* Increment the stack pointer. */
10396 uint64_t uNewRsp;
10397 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10398
10399 /* Write the dword the lazy way. */
10400 uint32_t *pu32Dst;
10401 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10402 if (rc == VINF_SUCCESS)
10403 {
10404 *pu32Dst = u32Value;
10405 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10406 }
10407
10408 /* Commit the new RSP value unless we an access handler made trouble. */
10409 if (rc == VINF_SUCCESS)
10410 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10411
10412 return rc;
10413}
10414
10415
10416/**
10417 * Pushes a dword segment register value onto the stack.
10418 *
10419 * @returns Strict VBox status code.
10420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10421 * @param u32Value The value to push.
10422 */
10423IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPU pVCpu, uint32_t u32Value)
10424{
10425 /* Increment the stack pointer. */
10426 uint64_t uNewRsp;
10427 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10428
10429 /* The intel docs talks about zero extending the selector register
10430 value. My actual intel CPU here might be zero extending the value
10431 but it still only writes the lower word... */
10432 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10433 * happens when crossing an electric page boundrary, is the high word checked
10434 * for write accessibility or not? Probably it is. What about segment limits?
10435 * It appears this behavior is also shared with trap error codes.
10436 *
10437 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10438 * ancient hardware when it actually did change. */
10439 uint16_t *pu16Dst;
10440 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10441 if (rc == VINF_SUCCESS)
10442 {
10443 *pu16Dst = (uint16_t)u32Value;
10444 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10445 }
10446
10447 /* Commit the new RSP value unless we an access handler made trouble. */
10448 if (rc == VINF_SUCCESS)
10449 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10450
10451 return rc;
10452}
10453
10454
10455/**
10456 * Pushes a qword onto the stack.
10457 *
10458 * @returns Strict VBox status code.
10459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10460 * @param u64Value The value to push.
10461 */
10462IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPU pVCpu, uint64_t u64Value)
10463{
10464 /* Increment the stack pointer. */
10465 uint64_t uNewRsp;
10466 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10467
10468 /* Write the word the lazy way. */
10469 uint64_t *pu64Dst;
10470 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10471 if (rc == VINF_SUCCESS)
10472 {
10473 *pu64Dst = u64Value;
10474 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10475 }
10476
10477 /* Commit the new RSP value unless we an access handler made trouble. */
10478 if (rc == VINF_SUCCESS)
10479 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10480
10481 return rc;
10482}
10483
10484
10485/**
10486 * Pops a word from the stack.
10487 *
10488 * @returns Strict VBox status code.
10489 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10490 * @param pu16Value Where to store the popped value.
10491 */
10492IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPU pVCpu, uint16_t *pu16Value)
10493{
10494 /* Increment the stack pointer. */
10495 uint64_t uNewRsp;
10496 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10497
10498 /* Write the word the lazy way. */
10499 uint16_t const *pu16Src;
10500 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10501 if (rc == VINF_SUCCESS)
10502 {
10503 *pu16Value = *pu16Src;
10504 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10505
10506 /* Commit the new RSP value. */
10507 if (rc == VINF_SUCCESS)
10508 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10509 }
10510
10511 return rc;
10512}
10513
10514
10515/**
10516 * Pops a dword from the stack.
10517 *
10518 * @returns Strict VBox status code.
10519 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10520 * @param pu32Value Where to store the popped value.
10521 */
10522IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPU pVCpu, uint32_t *pu32Value)
10523{
10524 /* Increment the stack pointer. */
10525 uint64_t uNewRsp;
10526 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10527
10528 /* Write the word the lazy way. */
10529 uint32_t const *pu32Src;
10530 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10531 if (rc == VINF_SUCCESS)
10532 {
10533 *pu32Value = *pu32Src;
10534 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10535
10536 /* Commit the new RSP value. */
10537 if (rc == VINF_SUCCESS)
10538 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10539 }
10540
10541 return rc;
10542}
10543
10544
10545/**
10546 * Pops a qword from the stack.
10547 *
10548 * @returns Strict VBox status code.
10549 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10550 * @param pu64Value Where to store the popped value.
10551 */
10552IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPU pVCpu, uint64_t *pu64Value)
10553{
10554 /* Increment the stack pointer. */
10555 uint64_t uNewRsp;
10556 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10557
10558 /* Write the word the lazy way. */
10559 uint64_t const *pu64Src;
10560 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10561 if (rc == VINF_SUCCESS)
10562 {
10563 *pu64Value = *pu64Src;
10564 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10565
10566 /* Commit the new RSP value. */
10567 if (rc == VINF_SUCCESS)
10568 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10569 }
10570
10571 return rc;
10572}
10573
10574
10575/**
10576 * Pushes a word onto the stack, using a temporary stack pointer.
10577 *
10578 * @returns Strict VBox status code.
10579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10580 * @param u16Value The value to push.
10581 * @param pTmpRsp Pointer to the temporary stack pointer.
10582 */
10583IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPU pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10584{
10585 /* Increment the stack pointer. */
10586 RTUINT64U NewRsp = *pTmpRsp;
10587 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10588
10589 /* Write the word the lazy way. */
10590 uint16_t *pu16Dst;
10591 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10592 if (rc == VINF_SUCCESS)
10593 {
10594 *pu16Dst = u16Value;
10595 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10596 }
10597
10598 /* Commit the new RSP value unless we an access handler made trouble. */
10599 if (rc == VINF_SUCCESS)
10600 *pTmpRsp = NewRsp;
10601
10602 return rc;
10603}
10604
10605
10606/**
10607 * Pushes a dword onto the stack, using a temporary stack pointer.
10608 *
10609 * @returns Strict VBox status code.
10610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10611 * @param u32Value The value to push.
10612 * @param pTmpRsp Pointer to the temporary stack pointer.
10613 */
10614IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPU pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10615{
10616 /* Increment the stack pointer. */
10617 RTUINT64U NewRsp = *pTmpRsp;
10618 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10619
10620 /* Write the word the lazy way. */
10621 uint32_t *pu32Dst;
10622 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10623 if (rc == VINF_SUCCESS)
10624 {
10625 *pu32Dst = u32Value;
10626 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10627 }
10628
10629 /* Commit the new RSP value unless we an access handler made trouble. */
10630 if (rc == VINF_SUCCESS)
10631 *pTmpRsp = NewRsp;
10632
10633 return rc;
10634}
10635
10636
10637/**
10638 * Pushes a dword onto the stack, using a temporary stack pointer.
10639 *
10640 * @returns Strict VBox status code.
10641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10642 * @param u64Value The value to push.
10643 * @param pTmpRsp Pointer to the temporary stack pointer.
10644 */
10645IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPU pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10646{
10647 /* Increment the stack pointer. */
10648 RTUINT64U NewRsp = *pTmpRsp;
10649 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10650
10651 /* Write the word the lazy way. */
10652 uint64_t *pu64Dst;
10653 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10654 if (rc == VINF_SUCCESS)
10655 {
10656 *pu64Dst = u64Value;
10657 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10658 }
10659
10660 /* Commit the new RSP value unless we an access handler made trouble. */
10661 if (rc == VINF_SUCCESS)
10662 *pTmpRsp = NewRsp;
10663
10664 return rc;
10665}
10666
10667
10668/**
10669 * Pops a word from the stack, using a temporary stack pointer.
10670 *
10671 * @returns Strict VBox status code.
10672 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10673 * @param pu16Value Where to store the popped value.
10674 * @param pTmpRsp Pointer to the temporary stack pointer.
10675 */
10676IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPU pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10677{
10678 /* Increment the stack pointer. */
10679 RTUINT64U NewRsp = *pTmpRsp;
10680 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10681
10682 /* Write the word the lazy way. */
10683 uint16_t const *pu16Src;
10684 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10685 if (rc == VINF_SUCCESS)
10686 {
10687 *pu16Value = *pu16Src;
10688 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10689
10690 /* Commit the new RSP value. */
10691 if (rc == VINF_SUCCESS)
10692 *pTmpRsp = NewRsp;
10693 }
10694
10695 return rc;
10696}
10697
10698
10699/**
10700 * Pops a dword from the stack, using a temporary stack pointer.
10701 *
10702 * @returns Strict VBox status code.
10703 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10704 * @param pu32Value Where to store the popped value.
10705 * @param pTmpRsp Pointer to the temporary stack pointer.
10706 */
10707IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPU pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10708{
10709 /* Increment the stack pointer. */
10710 RTUINT64U NewRsp = *pTmpRsp;
10711 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10712
10713 /* Write the word the lazy way. */
10714 uint32_t const *pu32Src;
10715 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10716 if (rc == VINF_SUCCESS)
10717 {
10718 *pu32Value = *pu32Src;
10719 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10720
10721 /* Commit the new RSP value. */
10722 if (rc == VINF_SUCCESS)
10723 *pTmpRsp = NewRsp;
10724 }
10725
10726 return rc;
10727}
10728
10729
10730/**
10731 * Pops a qword from the stack, using a temporary stack pointer.
10732 *
10733 * @returns Strict VBox status code.
10734 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10735 * @param pu64Value Where to store the popped value.
10736 * @param pTmpRsp Pointer to the temporary stack pointer.
10737 */
10738IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPU pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10739{
10740 /* Increment the stack pointer. */
10741 RTUINT64U NewRsp = *pTmpRsp;
10742 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10743
10744 /* Write the word the lazy way. */
10745 uint64_t const *pu64Src;
10746 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10747 if (rcStrict == VINF_SUCCESS)
10748 {
10749 *pu64Value = *pu64Src;
10750 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10751
10752 /* Commit the new RSP value. */
10753 if (rcStrict == VINF_SUCCESS)
10754 *pTmpRsp = NewRsp;
10755 }
10756
10757 return rcStrict;
10758}
10759
10760
10761/**
10762 * Begin a special stack push (used by interrupt, exceptions and such).
10763 *
10764 * This will raise \#SS or \#PF if appropriate.
10765 *
10766 * @returns Strict VBox status code.
10767 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10768 * @param cbMem The number of bytes to push onto the stack.
10769 * @param ppvMem Where to return the pointer to the stack memory.
10770 * As with the other memory functions this could be
10771 * direct access or bounce buffered access, so
10772 * don't commit register until the commit call
10773 * succeeds.
10774 * @param puNewRsp Where to return the new RSP value. This must be
10775 * passed unchanged to
10776 * iemMemStackPushCommitSpecial().
10777 */
10778IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPU pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10779{
10780 Assert(cbMem < UINT8_MAX);
10781 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10782 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10783}
10784
10785
10786/**
10787 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10788 *
10789 * This will update the rSP.
10790 *
10791 * @returns Strict VBox status code.
10792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10793 * @param pvMem The pointer returned by
10794 * iemMemStackPushBeginSpecial().
10795 * @param uNewRsp The new RSP value returned by
10796 * iemMemStackPushBeginSpecial().
10797 */
10798IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPU pVCpu, void *pvMem, uint64_t uNewRsp)
10799{
10800 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10801 if (rcStrict == VINF_SUCCESS)
10802 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10803 return rcStrict;
10804}
10805
10806
10807/**
10808 * Begin a special stack pop (used by iret, retf and such).
10809 *
10810 * This will raise \#SS or \#PF if appropriate.
10811 *
10812 * @returns Strict VBox status code.
10813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10814 * @param cbMem The number of bytes to pop from the stack.
10815 * @param ppvMem Where to return the pointer to the stack memory.
10816 * @param puNewRsp Where to return the new RSP value. This must be
10817 * assigned to CPUMCTX::rsp manually some time
10818 * after iemMemStackPopDoneSpecial() has been
10819 * called.
10820 */
10821IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10822{
10823 Assert(cbMem < UINT8_MAX);
10824 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10825 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10826}
10827
10828
10829/**
10830 * Continue a special stack pop (used by iret and retf).
10831 *
10832 * This will raise \#SS or \#PF if appropriate.
10833 *
10834 * @returns Strict VBox status code.
10835 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10836 * @param cbMem The number of bytes to pop from the stack.
10837 * @param ppvMem Where to return the pointer to the stack memory.
10838 * @param puNewRsp Where to return the new RSP value. This must be
10839 * assigned to CPUMCTX::rsp manually some time
10840 * after iemMemStackPopDoneSpecial() has been
10841 * called.
10842 */
10843IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPU pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10844{
10845 Assert(cbMem < UINT8_MAX);
10846 RTUINT64U NewRsp;
10847 NewRsp.u = *puNewRsp;
10848 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10849 *puNewRsp = NewRsp.u;
10850 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10851}
10852
10853
10854/**
10855 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10856 * iemMemStackPopContinueSpecial).
10857 *
10858 * The caller will manually commit the rSP.
10859 *
10860 * @returns Strict VBox status code.
10861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10862 * @param pvMem The pointer returned by
10863 * iemMemStackPopBeginSpecial() or
10864 * iemMemStackPopContinueSpecial().
10865 */
10866IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPU pVCpu, void const *pvMem)
10867{
10868 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10869}
10870
10871
10872/**
10873 * Fetches a system table byte.
10874 *
10875 * @returns Strict VBox status code.
10876 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10877 * @param pbDst Where to return the byte.
10878 * @param iSegReg The index of the segment register to use for
10879 * this access. The base and limits are checked.
10880 * @param GCPtrMem The address of the guest memory.
10881 */
10882IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPU pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10883{
10884 /* The lazy approach for now... */
10885 uint8_t const *pbSrc;
10886 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10887 if (rc == VINF_SUCCESS)
10888 {
10889 *pbDst = *pbSrc;
10890 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10891 }
10892 return rc;
10893}
10894
10895
10896/**
10897 * Fetches a system table word.
10898 *
10899 * @returns Strict VBox status code.
10900 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10901 * @param pu16Dst Where to return the word.
10902 * @param iSegReg The index of the segment register to use for
10903 * this access. The base and limits are checked.
10904 * @param GCPtrMem The address of the guest memory.
10905 */
10906IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPU pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10907{
10908 /* The lazy approach for now... */
10909 uint16_t const *pu16Src;
10910 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10911 if (rc == VINF_SUCCESS)
10912 {
10913 *pu16Dst = *pu16Src;
10914 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10915 }
10916 return rc;
10917}
10918
10919
10920/**
10921 * Fetches a system table dword.
10922 *
10923 * @returns Strict VBox status code.
10924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10925 * @param pu32Dst Where to return the dword.
10926 * @param iSegReg The index of the segment register to use for
10927 * this access. The base and limits are checked.
10928 * @param GCPtrMem The address of the guest memory.
10929 */
10930IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPU pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10931{
10932 /* The lazy approach for now... */
10933 uint32_t const *pu32Src;
10934 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10935 if (rc == VINF_SUCCESS)
10936 {
10937 *pu32Dst = *pu32Src;
10938 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10939 }
10940 return rc;
10941}
10942
10943
10944/**
10945 * Fetches a system table qword.
10946 *
10947 * @returns Strict VBox status code.
10948 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10949 * @param pu64Dst Where to return the qword.
10950 * @param iSegReg The index of the segment register to use for
10951 * this access. The base and limits are checked.
10952 * @param GCPtrMem The address of the guest memory.
10953 */
10954IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPU pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10955{
10956 /* The lazy approach for now... */
10957 uint64_t const *pu64Src;
10958 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10959 if (rc == VINF_SUCCESS)
10960 {
10961 *pu64Dst = *pu64Src;
10962 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10963 }
10964 return rc;
10965}
10966
10967
10968/**
10969 * Fetches a descriptor table entry with caller specified error code.
10970 *
10971 * @returns Strict VBox status code.
10972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10973 * @param pDesc Where to return the descriptor table entry.
10974 * @param uSel The selector which table entry to fetch.
10975 * @param uXcpt The exception to raise on table lookup error.
10976 * @param uErrorCode The error code associated with the exception.
10977 */
10978IEM_STATIC VBOXSTRICTRC
10979iemMemFetchSelDescWithErr(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10980{
10981 AssertPtr(pDesc);
10982 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10983
10984 /** @todo did the 286 require all 8 bytes to be accessible? */
10985 /*
10986 * Get the selector table base and check bounds.
10987 */
10988 RTGCPTR GCPtrBase;
10989 if (uSel & X86_SEL_LDT)
10990 {
10991 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10992 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10993 {
10994 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10995 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10996 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10997 uErrorCode, 0);
10998 }
10999
11000 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
11001 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
11002 }
11003 else
11004 {
11005 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
11006 {
11007 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
11008 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
11009 uErrorCode, 0);
11010 }
11011 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
11012 }
11013
11014 /*
11015 * Read the legacy descriptor and maybe the long mode extensions if
11016 * required.
11017 */
11018 VBOXSTRICTRC rcStrict;
11019 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
11020 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
11021 else
11022 {
11023 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
11024 if (rcStrict == VINF_SUCCESS)
11025 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
11026 if (rcStrict == VINF_SUCCESS)
11027 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
11028 if (rcStrict == VINF_SUCCESS)
11029 pDesc->Legacy.au16[3] = 0;
11030 else
11031 return rcStrict;
11032 }
11033
11034 if (rcStrict == VINF_SUCCESS)
11035 {
11036 if ( !IEM_IS_LONG_MODE(pVCpu)
11037 || pDesc->Legacy.Gen.u1DescType)
11038 pDesc->Long.au64[1] = 0;
11039 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
11040 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
11041 else
11042 {
11043 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
11044 /** @todo is this the right exception? */
11045 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
11046 }
11047 }
11048 return rcStrict;
11049}
11050
11051
11052/**
11053 * Fetches a descriptor table entry.
11054 *
11055 * @returns Strict VBox status code.
11056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11057 * @param pDesc Where to return the descriptor table entry.
11058 * @param uSel The selector which table entry to fetch.
11059 * @param uXcpt The exception to raise on table lookup error.
11060 */
11061IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPU pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
11062{
11063 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
11064}
11065
11066
11067/**
11068 * Fakes a long mode stack selector for SS = 0.
11069 *
11070 * @param pDescSs Where to return the fake stack descriptor.
11071 * @param uDpl The DPL we want.
11072 */
11073IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11074{
11075 pDescSs->Long.au64[0] = 0;
11076 pDescSs->Long.au64[1] = 0;
11077 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11078 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11079 pDescSs->Long.Gen.u2Dpl = uDpl;
11080 pDescSs->Long.Gen.u1Present = 1;
11081 pDescSs->Long.Gen.u1Long = 1;
11082}
11083
11084
11085/**
11086 * Marks the selector descriptor as accessed (only non-system descriptors).
11087 *
11088 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11089 * will therefore skip the limit checks.
11090 *
11091 * @returns Strict VBox status code.
11092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11093 * @param uSel The selector.
11094 */
11095IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPU pVCpu, uint16_t uSel)
11096{
11097 /*
11098 * Get the selector table base and calculate the entry address.
11099 */
11100 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11101 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11102 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11103 GCPtr += uSel & X86_SEL_MASK;
11104
11105 /*
11106 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11107 * ugly stuff to avoid this. This will make sure it's an atomic access
11108 * as well more or less remove any question about 8-bit or 32-bit accesss.
11109 */
11110 VBOXSTRICTRC rcStrict;
11111 uint32_t volatile *pu32;
11112 if ((GCPtr & 3) == 0)
11113 {
11114 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11115 GCPtr += 2 + 2;
11116 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11117 if (rcStrict != VINF_SUCCESS)
11118 return rcStrict;
11119 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11120 }
11121 else
11122 {
11123 /* The misaligned GDT/LDT case, map the whole thing. */
11124 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11125 if (rcStrict != VINF_SUCCESS)
11126 return rcStrict;
11127 switch ((uintptr_t)pu32 & 3)
11128 {
11129 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11130 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11131 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11132 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11133 }
11134 }
11135
11136 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11137}
11138
11139/** @} */
11140
11141
11142/*
11143 * Include the C/C++ implementation of instruction.
11144 */
11145#include "IEMAllCImpl.cpp.h"
11146
11147
11148
11149/** @name "Microcode" macros.
11150 *
11151 * The idea is that we should be able to use the same code to interpret
11152 * instructions as well as recompiler instructions. Thus this obfuscation.
11153 *
11154 * @{
11155 */
11156#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11157#define IEM_MC_END() }
11158#define IEM_MC_PAUSE() do {} while (0)
11159#define IEM_MC_CONTINUE() do {} while (0)
11160
11161/** Internal macro. */
11162#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11163 do \
11164 { \
11165 VBOXSTRICTRC rcStrict2 = a_Expr; \
11166 if (rcStrict2 != VINF_SUCCESS) \
11167 return rcStrict2; \
11168 } while (0)
11169
11170
11171#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11172#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11173#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11174#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11175#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11176#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11177#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11178#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11179#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11180 do { \
11181 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11182 return iemRaiseDeviceNotAvailable(pVCpu); \
11183 } while (0)
11184#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11185 do { \
11186 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11187 return iemRaiseDeviceNotAvailable(pVCpu); \
11188 } while (0)
11189#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11190 do { \
11191 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11192 return iemRaiseMathFault(pVCpu); \
11193 } while (0)
11194#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11195 do { \
11196 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11197 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11198 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11199 return iemRaiseUndefinedOpcode(pVCpu); \
11200 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11201 return iemRaiseDeviceNotAvailable(pVCpu); \
11202 } while (0)
11203#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11204 do { \
11205 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11206 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11207 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11208 return iemRaiseUndefinedOpcode(pVCpu); \
11209 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11210 return iemRaiseDeviceNotAvailable(pVCpu); \
11211 } while (0)
11212#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11213 do { \
11214 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11215 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11216 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11217 return iemRaiseUndefinedOpcode(pVCpu); \
11218 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11219 return iemRaiseDeviceNotAvailable(pVCpu); \
11220 } while (0)
11221#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11222 do { \
11223 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11224 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11225 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11226 return iemRaiseUndefinedOpcode(pVCpu); \
11227 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11228 return iemRaiseDeviceNotAvailable(pVCpu); \
11229 } while (0)
11230#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11231 do { \
11232 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11233 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11234 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11235 return iemRaiseUndefinedOpcode(pVCpu); \
11236 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11237 return iemRaiseDeviceNotAvailable(pVCpu); \
11238 } while (0)
11239#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11240 do { \
11241 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11242 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11243 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11244 return iemRaiseUndefinedOpcode(pVCpu); \
11245 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11246 return iemRaiseDeviceNotAvailable(pVCpu); \
11247 } while (0)
11248#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11249 do { \
11250 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11251 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11252 return iemRaiseUndefinedOpcode(pVCpu); \
11253 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11254 return iemRaiseDeviceNotAvailable(pVCpu); \
11255 } while (0)
11256#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11257 do { \
11258 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11259 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11260 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11261 return iemRaiseUndefinedOpcode(pVCpu); \
11262 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11263 return iemRaiseDeviceNotAvailable(pVCpu); \
11264 } while (0)
11265#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11266 do { \
11267 if (pVCpu->iem.s.uCpl != 0) \
11268 return iemRaiseGeneralProtectionFault0(pVCpu); \
11269 } while (0)
11270#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11271 do { \
11272 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11273 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11274 } while (0)
11275#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11276 do { \
11277 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11278 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11279 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11280 return iemRaiseUndefinedOpcode(pVCpu); \
11281 } while (0)
11282#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11283 do { \
11284 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11285 return iemRaiseGeneralProtectionFault0(pVCpu); \
11286 } while (0)
11287
11288
11289#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11290#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11291#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11292#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11293#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11294#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11295#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11296 uint32_t a_Name; \
11297 uint32_t *a_pName = &a_Name
11298#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11299 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11300
11301#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11302#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11303
11304#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11305#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11306#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11307#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11308#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11309#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11310#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11311#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11312#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11313#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11314#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11315#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11316#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11317#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11318#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11319#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11320#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11321#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11322 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11323 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11324 } while (0)
11325#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11326 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11327 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11328 } while (0)
11329#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11330 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11331 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11332 } while (0)
11333/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11334#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11335 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11336 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11337 } while (0)
11338#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11339 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11340 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11341 } while (0)
11342/** @note Not for IOPL or IF testing or modification. */
11343#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11344#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11345#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11346#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11347
11348#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11349#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11350#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11351#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11352#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11353#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11354#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11355#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11356#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11357#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11358/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11359#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11360 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11361 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11362 } while (0)
11363#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11364 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11365 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11366 } while (0)
11367#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11368 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11369
11370
11371#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11372#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11373/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11374 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11375#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11376#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11377/** @note Not for IOPL or IF testing or modification. */
11378#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11379
11380#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11381#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11382#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11383 do { \
11384 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11385 *pu32Reg += (a_u32Value); \
11386 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11387 } while (0)
11388#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11389
11390#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11391#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11392#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11393 do { \
11394 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11395 *pu32Reg -= (a_u32Value); \
11396 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11397 } while (0)
11398#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11399#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11400
11401#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11402#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11403#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11404#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11405#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11406#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11407#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11408
11409#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11410#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11411#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11412#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11413
11414#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11415#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11416#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11417
11418#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11419#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11420#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11421
11422#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11423#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11424#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11425
11426#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11427#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11428#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11429
11430#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11431
11432#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11433
11434#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11435#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11436#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11437 do { \
11438 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11439 *pu32Reg &= (a_u32Value); \
11440 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11441 } while (0)
11442#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11443
11444#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11445#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11446#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11447 do { \
11448 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11449 *pu32Reg |= (a_u32Value); \
11450 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11451 } while (0)
11452#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11453
11454
11455/** @note Not for IOPL or IF modification. */
11456#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11457/** @note Not for IOPL or IF modification. */
11458#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11459/** @note Not for IOPL or IF modification. */
11460#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11461
11462#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11463
11464/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11465#define IEM_MC_FPU_TO_MMX_MODE() do { \
11466 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11467 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11468 } while (0)
11469
11470/** Switches the FPU state from MMX mode (FTW=0xffff). */
11471#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11472 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11473 } while (0)
11474
11475#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11476 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11477#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11478 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11479#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11480 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11481 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11482 } while (0)
11483#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11484 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11485 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11486 } while (0)
11487#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11488 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11489#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11490 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11491#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11492 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11493
11494#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11495 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11496 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11497 } while (0)
11498#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11499 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11500#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11501 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11502#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11503 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11504#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11505 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11506 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11507 } while (0)
11508#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11509 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11510#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11511 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11512 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11513 } while (0)
11514#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11515 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11516#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11517 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11518 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11519 } while (0)
11520#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11521 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11522#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11523 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11524#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11525 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11526#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11527 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11528#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11529 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11530 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11531 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11532 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11533 } while (0)
11534
11535#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11536 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11537 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11538 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11539 } while (0)
11540#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11541 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11542 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11543 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11544 } while (0)
11545#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11546 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11547 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11548 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11549 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11550 } while (0)
11551#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11552 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11553 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11554 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11555 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11556 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11557 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11558 } while (0)
11559
11560#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11561#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11562 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11563 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11564 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11565 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11566 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11567 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11568 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11569 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11570 } while (0)
11571#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11572 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11573 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11574 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11575 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11577 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11578 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11579 } while (0)
11580#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11581 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11582 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11583 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11584 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11585 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11586 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11587 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11588 } while (0)
11589#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11590 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11591 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11592 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11593 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11594 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11595 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11596 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11597 } while (0)
11598
11599#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11600 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11601#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11602 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11603#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11604 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11605#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11606 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11607 uintptr_t const iYRegTmp = (a_iYReg); \
11608 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11609 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11610 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11611 } while (0)
11612
11613#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11614 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11615 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11616 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11617 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11618 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11619 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11620 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11621 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11622 } while (0)
11623#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11624 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11625 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11626 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11627 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11628 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11629 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11630 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11631 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11632 } while (0)
11633#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11634 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11635 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11636 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11637 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11638 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11639 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11640 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11641 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11642 } while (0)
11643
11644#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11645 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11646 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11647 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11648 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11649 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11650 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11651 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11652 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11653 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11654 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11655 } while (0)
11656#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11657 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11658 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11659 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11660 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11661 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11662 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11663 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11664 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11665 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11666 } while (0)
11667#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11668 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11669 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11670 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11671 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11672 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11673 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11674 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11675 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11676 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11677 } while (0)
11678#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11679 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11680 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11681 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11682 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11683 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11684 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11685 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11686 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11687 } while (0)
11688
11689#ifndef IEM_WITH_SETJMP
11690# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11692# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11693 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11694# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11696#else
11697# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11698 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11699# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11700 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11701# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11702 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11703#endif
11704
11705#ifndef IEM_WITH_SETJMP
11706# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11708# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11709 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11710# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11712#else
11713# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11714 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11715# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11716 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11717# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11718 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11719#endif
11720
11721#ifndef IEM_WITH_SETJMP
11722# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11724# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11725 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11726# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11727 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11728#else
11729# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11730 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11731# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11732 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11733# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11734 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11735#endif
11736
11737#ifdef SOME_UNUSED_FUNCTION
11738# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11739 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11740#endif
11741
11742#ifndef IEM_WITH_SETJMP
11743# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11745# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11746 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11747# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11748 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11749# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11751#else
11752# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11753 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11754# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11755 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11756# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11757 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11758# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11759 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11760#endif
11761
11762#ifndef IEM_WITH_SETJMP
11763# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11764 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11765# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11766 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11767# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11769#else
11770# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11771 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11772# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11773 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11774# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11775 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11776#endif
11777
11778#ifndef IEM_WITH_SETJMP
11779# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11781# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11782 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11783#else
11784# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11785 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11786# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11787 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11788#endif
11789
11790#ifndef IEM_WITH_SETJMP
11791# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11792 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11793# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11794 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11795#else
11796# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11797 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11798# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11799 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11800#endif
11801
11802
11803
11804#ifndef IEM_WITH_SETJMP
11805# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11806 do { \
11807 uint8_t u8Tmp; \
11808 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11809 (a_u16Dst) = u8Tmp; \
11810 } while (0)
11811# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11812 do { \
11813 uint8_t u8Tmp; \
11814 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11815 (a_u32Dst) = u8Tmp; \
11816 } while (0)
11817# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11818 do { \
11819 uint8_t u8Tmp; \
11820 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11821 (a_u64Dst) = u8Tmp; \
11822 } while (0)
11823# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11824 do { \
11825 uint16_t u16Tmp; \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11827 (a_u32Dst) = u16Tmp; \
11828 } while (0)
11829# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11830 do { \
11831 uint16_t u16Tmp; \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11833 (a_u64Dst) = u16Tmp; \
11834 } while (0)
11835# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11836 do { \
11837 uint32_t u32Tmp; \
11838 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11839 (a_u64Dst) = u32Tmp; \
11840 } while (0)
11841#else /* IEM_WITH_SETJMP */
11842# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11843 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11844# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11845 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11846# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11847 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11848# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11849 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11850# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11851 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11852# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11853 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11854#endif /* IEM_WITH_SETJMP */
11855
11856#ifndef IEM_WITH_SETJMP
11857# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11858 do { \
11859 uint8_t u8Tmp; \
11860 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11861 (a_u16Dst) = (int8_t)u8Tmp; \
11862 } while (0)
11863# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11864 do { \
11865 uint8_t u8Tmp; \
11866 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11867 (a_u32Dst) = (int8_t)u8Tmp; \
11868 } while (0)
11869# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11870 do { \
11871 uint8_t u8Tmp; \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11873 (a_u64Dst) = (int8_t)u8Tmp; \
11874 } while (0)
11875# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11876 do { \
11877 uint16_t u16Tmp; \
11878 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11879 (a_u32Dst) = (int16_t)u16Tmp; \
11880 } while (0)
11881# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11882 do { \
11883 uint16_t u16Tmp; \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11885 (a_u64Dst) = (int16_t)u16Tmp; \
11886 } while (0)
11887# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11888 do { \
11889 uint32_t u32Tmp; \
11890 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11891 (a_u64Dst) = (int32_t)u32Tmp; \
11892 } while (0)
11893#else /* IEM_WITH_SETJMP */
11894# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11895 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11896# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11897 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11898# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11899 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11900# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11901 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11902# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11903 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11904# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11905 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11906#endif /* IEM_WITH_SETJMP */
11907
11908#ifndef IEM_WITH_SETJMP
11909# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11911# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11912 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11913# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11914 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11915# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11916 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11917#else
11918# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11919 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11920# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11921 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11922# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11923 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11924# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11925 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11926#endif
11927
11928#ifndef IEM_WITH_SETJMP
11929# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11930 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11931# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11932 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11933# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11934 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11935# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11936 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11937#else
11938# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11939 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11940# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11941 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11942# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11943 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11944# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11945 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11946#endif
11947
11948#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11949#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11950#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11951#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11952#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11953#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11954#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11955 do { \
11956 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11957 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11958 } while (0)
11959
11960#ifndef IEM_WITH_SETJMP
11961# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11962 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11963# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11964 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11965#else
11966# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11967 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11968# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11969 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11970#endif
11971
11972#ifndef IEM_WITH_SETJMP
11973# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11974 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11975# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11976 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11977#else
11978# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11979 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11980# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11981 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11982#endif
11983
11984
11985#define IEM_MC_PUSH_U16(a_u16Value) \
11986 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11987#define IEM_MC_PUSH_U32(a_u32Value) \
11988 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11989#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11990 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11991#define IEM_MC_PUSH_U64(a_u64Value) \
11992 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11993
11994#define IEM_MC_POP_U16(a_pu16Value) \
11995 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11996#define IEM_MC_POP_U32(a_pu32Value) \
11997 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11998#define IEM_MC_POP_U64(a_pu64Value) \
11999 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
12000
12001/** Maps guest memory for direct or bounce buffered access.
12002 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12003 * @remarks May return.
12004 */
12005#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
12006 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12007
12008/** Maps guest memory for direct or bounce buffered access.
12009 * The purpose is to pass it to an operand implementation, thus the a_iArg.
12010 * @remarks May return.
12011 */
12012#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
12013 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
12014
12015/** Commits the memory and unmaps the guest memory.
12016 * @remarks May return.
12017 */
12018#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
12019 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
12020
12021/** Commits the memory and unmaps the guest memory unless the FPU status word
12022 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
12023 * that would cause FLD not to store.
12024 *
12025 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
12026 * store, while \#P will not.
12027 *
12028 * @remarks May in theory return - for now.
12029 */
12030#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
12031 do { \
12032 if ( !(a_u16FSW & X86_FSW_ES) \
12033 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
12034 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
12035 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
12036 } while (0)
12037
12038/** Calculate efficient address from R/M. */
12039#ifndef IEM_WITH_SETJMP
12040# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12041 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
12042#else
12043# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
12044 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
12045#endif
12046
12047#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
12048#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
12049#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
12050#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
12051#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
12052#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
12053#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
12054
12055/**
12056 * Defers the rest of the instruction emulation to a C implementation routine
12057 * and returns, only taking the standard parameters.
12058 *
12059 * @param a_pfnCImpl The pointer to the C routine.
12060 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12061 */
12062#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12063
12064/**
12065 * Defers the rest of instruction emulation to a C implementation routine and
12066 * returns, taking one argument in addition to the standard ones.
12067 *
12068 * @param a_pfnCImpl The pointer to the C routine.
12069 * @param a0 The argument.
12070 */
12071#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12072
12073/**
12074 * Defers the rest of the instruction emulation to a C implementation routine
12075 * and returns, taking two arguments in addition to the standard ones.
12076 *
12077 * @param a_pfnCImpl The pointer to the C routine.
12078 * @param a0 The first extra argument.
12079 * @param a1 The second extra argument.
12080 */
12081#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12082
12083/**
12084 * Defers the rest of the instruction emulation to a C implementation routine
12085 * and returns, taking three arguments in addition to the standard ones.
12086 *
12087 * @param a_pfnCImpl The pointer to the C routine.
12088 * @param a0 The first extra argument.
12089 * @param a1 The second extra argument.
12090 * @param a2 The third extra argument.
12091 */
12092#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12093
12094/**
12095 * Defers the rest of the instruction emulation to a C implementation routine
12096 * and returns, taking four arguments in addition to the standard ones.
12097 *
12098 * @param a_pfnCImpl The pointer to the C routine.
12099 * @param a0 The first extra argument.
12100 * @param a1 The second extra argument.
12101 * @param a2 The third extra argument.
12102 * @param a3 The fourth extra argument.
12103 */
12104#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12105
12106/**
12107 * Defers the rest of the instruction emulation to a C implementation routine
12108 * and returns, taking two arguments in addition to the standard ones.
12109 *
12110 * @param a_pfnCImpl The pointer to the C routine.
12111 * @param a0 The first extra argument.
12112 * @param a1 The second extra argument.
12113 * @param a2 The third extra argument.
12114 * @param a3 The fourth extra argument.
12115 * @param a4 The fifth extra argument.
12116 */
12117#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12118
12119/**
12120 * Defers the entire instruction emulation to a C implementation routine and
12121 * returns, only taking the standard parameters.
12122 *
12123 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12124 *
12125 * @param a_pfnCImpl The pointer to the C routine.
12126 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12127 */
12128#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12129
12130/**
12131 * Defers the entire instruction emulation to a C implementation routine and
12132 * returns, taking one argument in addition to the standard ones.
12133 *
12134 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12135 *
12136 * @param a_pfnCImpl The pointer to the C routine.
12137 * @param a0 The argument.
12138 */
12139#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12140
12141/**
12142 * Defers the entire instruction emulation to a C implementation routine and
12143 * returns, taking two arguments in addition to the standard ones.
12144 *
12145 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12146 *
12147 * @param a_pfnCImpl The pointer to the C routine.
12148 * @param a0 The first extra argument.
12149 * @param a1 The second extra argument.
12150 */
12151#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12152
12153/**
12154 * Defers the entire instruction emulation to a C implementation routine and
12155 * returns, taking three arguments in addition to the standard ones.
12156 *
12157 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12158 *
12159 * @param a_pfnCImpl The pointer to the C routine.
12160 * @param a0 The first extra argument.
12161 * @param a1 The second extra argument.
12162 * @param a2 The third extra argument.
12163 */
12164#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12165
12166/**
12167 * Calls a FPU assembly implementation taking one visible argument.
12168 *
12169 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12170 * @param a0 The first extra argument.
12171 */
12172#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12173 do { \
12174 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12175 } while (0)
12176
12177/**
12178 * Calls a FPU assembly implementation taking two visible arguments.
12179 *
12180 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12181 * @param a0 The first extra argument.
12182 * @param a1 The second extra argument.
12183 */
12184#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12185 do { \
12186 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12187 } while (0)
12188
12189/**
12190 * Calls a FPU assembly implementation taking three visible arguments.
12191 *
12192 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12193 * @param a0 The first extra argument.
12194 * @param a1 The second extra argument.
12195 * @param a2 The third extra argument.
12196 */
12197#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12198 do { \
12199 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12200 } while (0)
12201
12202#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12203 do { \
12204 (a_FpuData).FSW = (a_FSW); \
12205 (a_FpuData).r80Result = *(a_pr80Value); \
12206 } while (0)
12207
12208/** Pushes FPU result onto the stack. */
12209#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12210 iemFpuPushResult(pVCpu, &a_FpuData)
12211/** Pushes FPU result onto the stack and sets the FPUDP. */
12212#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12213 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12214
12215/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12216#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12217 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12218
12219/** Stores FPU result in a stack register. */
12220#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12221 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12222/** Stores FPU result in a stack register and pops the stack. */
12223#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12224 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12225/** Stores FPU result in a stack register and sets the FPUDP. */
12226#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12227 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12228/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12229 * stack. */
12230#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12231 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12232
12233/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12234#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12235 iemFpuUpdateOpcodeAndIp(pVCpu)
12236/** Free a stack register (for FFREE and FFREEP). */
12237#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12238 iemFpuStackFree(pVCpu, a_iStReg)
12239/** Increment the FPU stack pointer. */
12240#define IEM_MC_FPU_STACK_INC_TOP() \
12241 iemFpuStackIncTop(pVCpu)
12242/** Decrement the FPU stack pointer. */
12243#define IEM_MC_FPU_STACK_DEC_TOP() \
12244 iemFpuStackDecTop(pVCpu)
12245
12246/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12247#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12248 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12249/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12250#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12251 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12252/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12253#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12254 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12255/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12256#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12257 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12258/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12259 * stack. */
12260#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12261 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12262/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12263#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12264 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12265
12266/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12267#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12268 iemFpuStackUnderflow(pVCpu, a_iStDst)
12269/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12270 * stack. */
12271#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12272 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12273/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12274 * FPUDS. */
12275#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12276 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12277/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12278 * FPUDS. Pops stack. */
12279#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12280 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12281/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12282 * stack twice. */
12283#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12284 iemFpuStackUnderflowThenPopPop(pVCpu)
12285/** Raises a FPU stack underflow exception for an instruction pushing a result
12286 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12287#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12288 iemFpuStackPushUnderflow(pVCpu)
12289/** Raises a FPU stack underflow exception for an instruction pushing a result
12290 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12291#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12292 iemFpuStackPushUnderflowTwo(pVCpu)
12293
12294/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12295 * FPUIP, FPUCS and FOP. */
12296#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12297 iemFpuStackPushOverflow(pVCpu)
12298/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12299 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12300#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12301 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12302/** Prepares for using the FPU state.
12303 * Ensures that we can use the host FPU in the current context (RC+R0.
12304 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12305#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12306/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12307#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12308/** Actualizes the guest FPU state so it can be accessed and modified. */
12309#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12310
12311/** Prepares for using the SSE state.
12312 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12313 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12314#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12315/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12316#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12317/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12318#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12319
12320/** Prepares for using the AVX state.
12321 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12322 * Ensures the guest AVX state in the CPUMCTX is up to date.
12323 * @note This will include the AVX512 state too when support for it is added
12324 * due to the zero extending feature of VEX instruction. */
12325#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12326/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12327#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12328/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12329#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12330
12331/**
12332 * Calls a MMX assembly implementation taking two visible arguments.
12333 *
12334 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12335 * @param a0 The first extra argument.
12336 * @param a1 The second extra argument.
12337 */
12338#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12339 do { \
12340 IEM_MC_PREPARE_FPU_USAGE(); \
12341 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12342 } while (0)
12343
12344/**
12345 * Calls a MMX assembly implementation taking three visible arguments.
12346 *
12347 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12348 * @param a0 The first extra argument.
12349 * @param a1 The second extra argument.
12350 * @param a2 The third extra argument.
12351 */
12352#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12353 do { \
12354 IEM_MC_PREPARE_FPU_USAGE(); \
12355 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12356 } while (0)
12357
12358
12359/**
12360 * Calls a SSE assembly implementation taking two visible arguments.
12361 *
12362 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12363 * @param a0 The first extra argument.
12364 * @param a1 The second extra argument.
12365 */
12366#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12367 do { \
12368 IEM_MC_PREPARE_SSE_USAGE(); \
12369 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12370 } while (0)
12371
12372/**
12373 * Calls a SSE assembly implementation taking three visible arguments.
12374 *
12375 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12376 * @param a0 The first extra argument.
12377 * @param a1 The second extra argument.
12378 * @param a2 The third extra argument.
12379 */
12380#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12381 do { \
12382 IEM_MC_PREPARE_SSE_USAGE(); \
12383 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12384 } while (0)
12385
12386
12387/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12388 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12389#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12390 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12391
12392/**
12393 * Calls a AVX assembly implementation taking two visible arguments.
12394 *
12395 * There is one implicit zero'th argument, a pointer to the extended state.
12396 *
12397 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12398 * @param a1 The first extra argument.
12399 * @param a2 The second extra argument.
12400 */
12401#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12402 do { \
12403 IEM_MC_PREPARE_AVX_USAGE(); \
12404 a_pfnAImpl(pXState, (a1), (a2)); \
12405 } while (0)
12406
12407/**
12408 * Calls a AVX assembly implementation taking three visible arguments.
12409 *
12410 * There is one implicit zero'th argument, a pointer to the extended state.
12411 *
12412 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12413 * @param a1 The first extra argument.
12414 * @param a2 The second extra argument.
12415 * @param a3 The third extra argument.
12416 */
12417#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12418 do { \
12419 IEM_MC_PREPARE_AVX_USAGE(); \
12420 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12421 } while (0)
12422
12423/** @note Not for IOPL or IF testing. */
12424#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12425/** @note Not for IOPL or IF testing. */
12426#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12427/** @note Not for IOPL or IF testing. */
12428#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12429/** @note Not for IOPL or IF testing. */
12430#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12431/** @note Not for IOPL or IF testing. */
12432#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12433 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12434 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12435/** @note Not for IOPL or IF testing. */
12436#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12437 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12438 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12439/** @note Not for IOPL or IF testing. */
12440#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12441 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12442 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12443 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12444/** @note Not for IOPL or IF testing. */
12445#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12446 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12447 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12448 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12449#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12450#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12451#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12452/** @note Not for IOPL or IF testing. */
12453#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12454 if ( pVCpu->cpum.GstCtx.cx != 0 \
12455 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12456/** @note Not for IOPL or IF testing. */
12457#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12458 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12459 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12460/** @note Not for IOPL or IF testing. */
12461#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12462 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12463 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12464/** @note Not for IOPL or IF testing. */
12465#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12466 if ( pVCpu->cpum.GstCtx.cx != 0 \
12467 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12468/** @note Not for IOPL or IF testing. */
12469#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12470 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12471 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12472/** @note Not for IOPL or IF testing. */
12473#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12474 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12475 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12476#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12477#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12478
12479#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12480 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12481#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12482 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12483#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12484 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12485#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12486 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12487#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12488 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12489#define IEM_MC_IF_FCW_IM() \
12490 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12491
12492#define IEM_MC_ELSE() } else {
12493#define IEM_MC_ENDIF() } do {} while (0)
12494
12495/** @} */
12496
12497
12498/** @name Opcode Debug Helpers.
12499 * @{
12500 */
12501#ifdef VBOX_WITH_STATISTICS
12502# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12503#else
12504# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12505#endif
12506
12507#ifdef DEBUG
12508# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12509 do { \
12510 IEMOP_INC_STATS(a_Stats); \
12511 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12512 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12513 } while (0)
12514
12515# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12516 do { \
12517 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12518 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12519 (void)RT_CONCAT(OP_,a_Upper); \
12520 (void)(a_fDisHints); \
12521 (void)(a_fIemHints); \
12522 } while (0)
12523
12524# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12525 do { \
12526 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12527 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12528 (void)RT_CONCAT(OP_,a_Upper); \
12529 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12530 (void)(a_fDisHints); \
12531 (void)(a_fIemHints); \
12532 } while (0)
12533
12534# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12535 do { \
12536 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12537 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12538 (void)RT_CONCAT(OP_,a_Upper); \
12539 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12540 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12541 (void)(a_fDisHints); \
12542 (void)(a_fIemHints); \
12543 } while (0)
12544
12545# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12546 do { \
12547 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12548 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12549 (void)RT_CONCAT(OP_,a_Upper); \
12550 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12551 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12552 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12553 (void)(a_fDisHints); \
12554 (void)(a_fIemHints); \
12555 } while (0)
12556
12557# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12558 do { \
12559 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12560 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12561 (void)RT_CONCAT(OP_,a_Upper); \
12562 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12563 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12564 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12565 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12566 (void)(a_fDisHints); \
12567 (void)(a_fIemHints); \
12568 } while (0)
12569
12570#else
12571# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12572
12573# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12574 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12575# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12576 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12577# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12578 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12579# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12580 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12581# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12582 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12583
12584#endif
12585
12586#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12587 IEMOP_MNEMONIC0EX(a_Lower, \
12588 #a_Lower, \
12589 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12590#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12591 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12592 #a_Lower " " #a_Op1, \
12593 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12594#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12595 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12596 #a_Lower " " #a_Op1 "," #a_Op2, \
12597 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12598#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12599 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12600 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12601 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12602#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12603 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12604 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12605 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12606
12607/** @} */
12608
12609
12610/** @name Opcode Helpers.
12611 * @{
12612 */
12613
12614#ifdef IN_RING3
12615# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12616 do { \
12617 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12618 else \
12619 { \
12620 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12621 return IEMOP_RAISE_INVALID_OPCODE(); \
12622 } \
12623 } while (0)
12624#else
12625# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12626 do { \
12627 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12628 else return IEMOP_RAISE_INVALID_OPCODE(); \
12629 } while (0)
12630#endif
12631
12632/** The instruction requires a 186 or later. */
12633#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12634# define IEMOP_HLP_MIN_186() do { } while (0)
12635#else
12636# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12637#endif
12638
12639/** The instruction requires a 286 or later. */
12640#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12641# define IEMOP_HLP_MIN_286() do { } while (0)
12642#else
12643# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12644#endif
12645
12646/** The instruction requires a 386 or later. */
12647#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12648# define IEMOP_HLP_MIN_386() do { } while (0)
12649#else
12650# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12651#endif
12652
12653/** The instruction requires a 386 or later if the given expression is true. */
12654#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12655# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12656#else
12657# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12658#endif
12659
12660/** The instruction requires a 486 or later. */
12661#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12662# define IEMOP_HLP_MIN_486() do { } while (0)
12663#else
12664# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12665#endif
12666
12667/** The instruction requires a Pentium (586) or later. */
12668#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12669# define IEMOP_HLP_MIN_586() do { } while (0)
12670#else
12671# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12672#endif
12673
12674/** The instruction requires a PentiumPro (686) or later. */
12675#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12676# define IEMOP_HLP_MIN_686() do { } while (0)
12677#else
12678# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12679#endif
12680
12681
12682/** The instruction raises an \#UD in real and V8086 mode. */
12683#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12684 do \
12685 { \
12686 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12687 else return IEMOP_RAISE_INVALID_OPCODE(); \
12688 } while (0)
12689
12690#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12691/** This instruction raises an \#UD in real and V8086 mode or when not using a
12692 * 64-bit code segment when in long mode (applicable to all VMX instructions
12693 * except VMCALL).
12694 */
12695#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12696 do \
12697 { \
12698 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12699 && ( !IEM_IS_LONG_MODE(pVCpu) \
12700 || IEM_IS_64BIT_CODE(pVCpu))) \
12701 { /* likely */ } \
12702 else \
12703 { \
12704 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12705 { \
12706 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12707 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12708 return IEMOP_RAISE_INVALID_OPCODE(); \
12709 } \
12710 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12711 { \
12712 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12713 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12714 return IEMOP_RAISE_INVALID_OPCODE(); \
12715 } \
12716 } \
12717 } while (0)
12718
12719/** The instruction can only be executed in VMX operation (VMX root mode and
12720 * non-root mode).
12721 *
12722 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12723 */
12724# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12725 do \
12726 { \
12727 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12728 else \
12729 { \
12730 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12731 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12732 return IEMOP_RAISE_INVALID_OPCODE(); \
12733 } \
12734 } while (0)
12735#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12736
12737/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12738 * 64-bit mode. */
12739#define IEMOP_HLP_NO_64BIT() \
12740 do \
12741 { \
12742 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12743 return IEMOP_RAISE_INVALID_OPCODE(); \
12744 } while (0)
12745
12746/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12747 * 64-bit mode. */
12748#define IEMOP_HLP_ONLY_64BIT() \
12749 do \
12750 { \
12751 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12752 return IEMOP_RAISE_INVALID_OPCODE(); \
12753 } while (0)
12754
12755/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12756#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12757 do \
12758 { \
12759 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12760 iemRecalEffOpSize64Default(pVCpu); \
12761 } while (0)
12762
12763/** The instruction has 64-bit operand size if 64-bit mode. */
12764#define IEMOP_HLP_64BIT_OP_SIZE() \
12765 do \
12766 { \
12767 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12768 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12769 } while (0)
12770
12771/** Only a REX prefix immediately preceeding the first opcode byte takes
12772 * effect. This macro helps ensuring this as well as logging bad guest code. */
12773#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12774 do \
12775 { \
12776 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12777 { \
12778 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12779 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12780 pVCpu->iem.s.uRexB = 0; \
12781 pVCpu->iem.s.uRexIndex = 0; \
12782 pVCpu->iem.s.uRexReg = 0; \
12783 iemRecalEffOpSize(pVCpu); \
12784 } \
12785 } while (0)
12786
12787/**
12788 * Done decoding.
12789 */
12790#define IEMOP_HLP_DONE_DECODING() \
12791 do \
12792 { \
12793 /*nothing for now, maybe later... */ \
12794 } while (0)
12795
12796/**
12797 * Done decoding, raise \#UD exception if lock prefix present.
12798 */
12799#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12800 do \
12801 { \
12802 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12803 { /* likely */ } \
12804 else \
12805 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12806 } while (0)
12807
12808
12809/**
12810 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12811 * repnz or size prefixes are present, or if in real or v8086 mode.
12812 */
12813#define IEMOP_HLP_DONE_VEX_DECODING() \
12814 do \
12815 { \
12816 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12817 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12818 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12819 { /* likely */ } \
12820 else \
12821 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12822 } while (0)
12823
12824/**
12825 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12826 * repnz or size prefixes are present, or if in real or v8086 mode.
12827 */
12828#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12829 do \
12830 { \
12831 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12832 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12833 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12834 && pVCpu->iem.s.uVexLength == 0)) \
12835 { /* likely */ } \
12836 else \
12837 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12838 } while (0)
12839
12840
12841/**
12842 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12843 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12844 * register 0, or if in real or v8086 mode.
12845 */
12846#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12847 do \
12848 { \
12849 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12850 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12851 && !pVCpu->iem.s.uVex3rdReg \
12852 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12853 { /* likely */ } \
12854 else \
12855 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12856 } while (0)
12857
12858/**
12859 * Done decoding VEX, no V, L=0.
12860 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12861 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12862 */
12863#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12864 do \
12865 { \
12866 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12867 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12868 && pVCpu->iem.s.uVexLength == 0 \
12869 && pVCpu->iem.s.uVex3rdReg == 0 \
12870 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12871 { /* likely */ } \
12872 else \
12873 return IEMOP_RAISE_INVALID_OPCODE(); \
12874 } while (0)
12875
12876#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12877 do \
12878 { \
12879 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12880 { /* likely */ } \
12881 else \
12882 { \
12883 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12884 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12885 } \
12886 } while (0)
12887#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12888 do \
12889 { \
12890 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12891 { /* likely */ } \
12892 else \
12893 { \
12894 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12895 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12896 } \
12897 } while (0)
12898
12899/**
12900 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12901 * are present.
12902 */
12903#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12904 do \
12905 { \
12906 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12907 { /* likely */ } \
12908 else \
12909 return IEMOP_RAISE_INVALID_OPCODE(); \
12910 } while (0)
12911
12912/**
12913 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12914 * prefixes are present.
12915 */
12916#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12917 do \
12918 { \
12919 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12920 { /* likely */ } \
12921 else \
12922 return IEMOP_RAISE_INVALID_OPCODE(); \
12923 } while (0)
12924
12925
12926/**
12927 * Calculates the effective address of a ModR/M memory operand.
12928 *
12929 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12930 *
12931 * @return Strict VBox status code.
12932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12933 * @param bRm The ModRM byte.
12934 * @param cbImm The size of any immediate following the
12935 * effective address opcode bytes. Important for
12936 * RIP relative addressing.
12937 * @param pGCPtrEff Where to return the effective address.
12938 */
12939IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12940{
12941 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12942# define SET_SS_DEF() \
12943 do \
12944 { \
12945 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12946 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12947 } while (0)
12948
12949 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12950 {
12951/** @todo Check the effective address size crap! */
12952 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12953 {
12954 uint16_t u16EffAddr;
12955
12956 /* Handle the disp16 form with no registers first. */
12957 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12958 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12959 else
12960 {
12961 /* Get the displacment. */
12962 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12963 {
12964 case 0: u16EffAddr = 0; break;
12965 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12966 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12967 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12968 }
12969
12970 /* Add the base and index registers to the disp. */
12971 switch (bRm & X86_MODRM_RM_MASK)
12972 {
12973 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12974 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12975 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12976 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12977 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12978 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12979 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12980 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12981 }
12982 }
12983
12984 *pGCPtrEff = u16EffAddr;
12985 }
12986 else
12987 {
12988 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12989 uint32_t u32EffAddr;
12990
12991 /* Handle the disp32 form with no registers first. */
12992 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12993 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12994 else
12995 {
12996 /* Get the register (or SIB) value. */
12997 switch ((bRm & X86_MODRM_RM_MASK))
12998 {
12999 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13000 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13001 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13002 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13003 case 4: /* SIB */
13004 {
13005 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13006
13007 /* Get the index and scale it. */
13008 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13009 {
13010 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13011 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13012 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13013 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13014 case 4: u32EffAddr = 0; /*none */ break;
13015 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13016 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13017 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13018 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13019 }
13020 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13021
13022 /* add base */
13023 switch (bSib & X86_SIB_BASE_MASK)
13024 {
13025 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13026 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13027 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13028 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13029 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13030 case 5:
13031 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13032 {
13033 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13034 SET_SS_DEF();
13035 }
13036 else
13037 {
13038 uint32_t u32Disp;
13039 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13040 u32EffAddr += u32Disp;
13041 }
13042 break;
13043 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13044 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13045 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13046 }
13047 break;
13048 }
13049 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13050 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13051 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13052 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13053 }
13054
13055 /* Get and add the displacement. */
13056 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13057 {
13058 case 0:
13059 break;
13060 case 1:
13061 {
13062 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13063 u32EffAddr += i8Disp;
13064 break;
13065 }
13066 case 2:
13067 {
13068 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13069 u32EffAddr += u32Disp;
13070 break;
13071 }
13072 default:
13073 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13074 }
13075
13076 }
13077 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13078 *pGCPtrEff = u32EffAddr;
13079 else
13080 {
13081 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13082 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13083 }
13084 }
13085 }
13086 else
13087 {
13088 uint64_t u64EffAddr;
13089
13090 /* Handle the rip+disp32 form with no registers first. */
13091 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13092 {
13093 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13094 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13095 }
13096 else
13097 {
13098 /* Get the register (or SIB) value. */
13099 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13100 {
13101 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13102 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13103 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13104 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13105 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13106 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13107 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13108 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13109 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13110 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13111 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13112 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13113 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13114 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13115 /* SIB */
13116 case 4:
13117 case 12:
13118 {
13119 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13120
13121 /* Get the index and scale it. */
13122 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13123 {
13124 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13125 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13126 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13127 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13128 case 4: u64EffAddr = 0; /*none */ break;
13129 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13130 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13131 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13132 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13133 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13134 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13135 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13136 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13137 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13138 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13139 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13140 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13141 }
13142 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13143
13144 /* add base */
13145 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13146 {
13147 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13148 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13149 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13150 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13151 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13152 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13153 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13154 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13155 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13156 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13157 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13158 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13159 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13160 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13161 /* complicated encodings */
13162 case 5:
13163 case 13:
13164 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13165 {
13166 if (!pVCpu->iem.s.uRexB)
13167 {
13168 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13169 SET_SS_DEF();
13170 }
13171 else
13172 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13173 }
13174 else
13175 {
13176 uint32_t u32Disp;
13177 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13178 u64EffAddr += (int32_t)u32Disp;
13179 }
13180 break;
13181 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13182 }
13183 break;
13184 }
13185 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13186 }
13187
13188 /* Get and add the displacement. */
13189 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13190 {
13191 case 0:
13192 break;
13193 case 1:
13194 {
13195 int8_t i8Disp;
13196 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13197 u64EffAddr += i8Disp;
13198 break;
13199 }
13200 case 2:
13201 {
13202 uint32_t u32Disp;
13203 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13204 u64EffAddr += (int32_t)u32Disp;
13205 break;
13206 }
13207 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13208 }
13209
13210 }
13211
13212 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13213 *pGCPtrEff = u64EffAddr;
13214 else
13215 {
13216 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13217 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13218 }
13219 }
13220
13221 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13222 return VINF_SUCCESS;
13223}
13224
13225
13226/**
13227 * Calculates the effective address of a ModR/M memory operand.
13228 *
13229 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13230 *
13231 * @return Strict VBox status code.
13232 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13233 * @param bRm The ModRM byte.
13234 * @param cbImm The size of any immediate following the
13235 * effective address opcode bytes. Important for
13236 * RIP relative addressing.
13237 * @param pGCPtrEff Where to return the effective address.
13238 * @param offRsp RSP displacement.
13239 */
13240IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13241{
13242 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13243# define SET_SS_DEF() \
13244 do \
13245 { \
13246 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13247 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13248 } while (0)
13249
13250 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13251 {
13252/** @todo Check the effective address size crap! */
13253 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13254 {
13255 uint16_t u16EffAddr;
13256
13257 /* Handle the disp16 form with no registers first. */
13258 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13259 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13260 else
13261 {
13262 /* Get the displacment. */
13263 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13264 {
13265 case 0: u16EffAddr = 0; break;
13266 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13267 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13268 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13269 }
13270
13271 /* Add the base and index registers to the disp. */
13272 switch (bRm & X86_MODRM_RM_MASK)
13273 {
13274 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13275 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13276 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13277 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13278 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13279 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13280 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13281 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13282 }
13283 }
13284
13285 *pGCPtrEff = u16EffAddr;
13286 }
13287 else
13288 {
13289 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13290 uint32_t u32EffAddr;
13291
13292 /* Handle the disp32 form with no registers first. */
13293 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13294 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13295 else
13296 {
13297 /* Get the register (or SIB) value. */
13298 switch ((bRm & X86_MODRM_RM_MASK))
13299 {
13300 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13301 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13302 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13303 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13304 case 4: /* SIB */
13305 {
13306 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13307
13308 /* Get the index and scale it. */
13309 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13310 {
13311 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13312 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13313 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13314 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13315 case 4: u32EffAddr = 0; /*none */ break;
13316 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13317 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13318 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13319 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13320 }
13321 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13322
13323 /* add base */
13324 switch (bSib & X86_SIB_BASE_MASK)
13325 {
13326 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13327 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13328 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13329 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13330 case 4:
13331 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13332 SET_SS_DEF();
13333 break;
13334 case 5:
13335 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13336 {
13337 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13338 SET_SS_DEF();
13339 }
13340 else
13341 {
13342 uint32_t u32Disp;
13343 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13344 u32EffAddr += u32Disp;
13345 }
13346 break;
13347 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13348 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13349 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13350 }
13351 break;
13352 }
13353 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13354 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13355 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13356 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13357 }
13358
13359 /* Get and add the displacement. */
13360 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13361 {
13362 case 0:
13363 break;
13364 case 1:
13365 {
13366 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13367 u32EffAddr += i8Disp;
13368 break;
13369 }
13370 case 2:
13371 {
13372 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13373 u32EffAddr += u32Disp;
13374 break;
13375 }
13376 default:
13377 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13378 }
13379
13380 }
13381 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13382 *pGCPtrEff = u32EffAddr;
13383 else
13384 {
13385 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13386 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13387 }
13388 }
13389 }
13390 else
13391 {
13392 uint64_t u64EffAddr;
13393
13394 /* Handle the rip+disp32 form with no registers first. */
13395 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13396 {
13397 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13398 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13399 }
13400 else
13401 {
13402 /* Get the register (or SIB) value. */
13403 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13404 {
13405 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13406 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13407 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13408 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13409 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13410 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13411 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13412 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13413 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13414 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13415 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13416 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13417 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13418 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13419 /* SIB */
13420 case 4:
13421 case 12:
13422 {
13423 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13424
13425 /* Get the index and scale it. */
13426 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13427 {
13428 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13429 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13430 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13431 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13432 case 4: u64EffAddr = 0; /*none */ break;
13433 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13434 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13435 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13436 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13437 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13438 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13439 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13440 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13441 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13442 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13443 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13444 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13445 }
13446 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13447
13448 /* add base */
13449 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13450 {
13451 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13452 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13453 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13454 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13455 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13456 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13457 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13458 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13459 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13460 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13461 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13462 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13463 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13464 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13465 /* complicated encodings */
13466 case 5:
13467 case 13:
13468 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13469 {
13470 if (!pVCpu->iem.s.uRexB)
13471 {
13472 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13473 SET_SS_DEF();
13474 }
13475 else
13476 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13477 }
13478 else
13479 {
13480 uint32_t u32Disp;
13481 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13482 u64EffAddr += (int32_t)u32Disp;
13483 }
13484 break;
13485 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13486 }
13487 break;
13488 }
13489 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13490 }
13491
13492 /* Get and add the displacement. */
13493 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13494 {
13495 case 0:
13496 break;
13497 case 1:
13498 {
13499 int8_t i8Disp;
13500 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13501 u64EffAddr += i8Disp;
13502 break;
13503 }
13504 case 2:
13505 {
13506 uint32_t u32Disp;
13507 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13508 u64EffAddr += (int32_t)u32Disp;
13509 break;
13510 }
13511 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13512 }
13513
13514 }
13515
13516 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13517 *pGCPtrEff = u64EffAddr;
13518 else
13519 {
13520 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13521 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13522 }
13523 }
13524
13525 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13526 return VINF_SUCCESS;
13527}
13528
13529
13530#ifdef IEM_WITH_SETJMP
13531/**
13532 * Calculates the effective address of a ModR/M memory operand.
13533 *
13534 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13535 *
13536 * May longjmp on internal error.
13537 *
13538 * @return The effective address.
13539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13540 * @param bRm The ModRM byte.
13541 * @param cbImm The size of any immediate following the
13542 * effective address opcode bytes. Important for
13543 * RIP relative addressing.
13544 */
13545IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPU pVCpu, uint8_t bRm, uint8_t cbImm)
13546{
13547 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13548# define SET_SS_DEF() \
13549 do \
13550 { \
13551 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13552 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13553 } while (0)
13554
13555 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13556 {
13557/** @todo Check the effective address size crap! */
13558 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13559 {
13560 uint16_t u16EffAddr;
13561
13562 /* Handle the disp16 form with no registers first. */
13563 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13564 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13565 else
13566 {
13567 /* Get the displacment. */
13568 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13569 {
13570 case 0: u16EffAddr = 0; break;
13571 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13572 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13573 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13574 }
13575
13576 /* Add the base and index registers to the disp. */
13577 switch (bRm & X86_MODRM_RM_MASK)
13578 {
13579 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13580 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13581 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13582 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13583 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13584 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13585 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13586 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13587 }
13588 }
13589
13590 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13591 return u16EffAddr;
13592 }
13593
13594 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13595 uint32_t u32EffAddr;
13596
13597 /* Handle the disp32 form with no registers first. */
13598 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13599 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13600 else
13601 {
13602 /* Get the register (or SIB) value. */
13603 switch ((bRm & X86_MODRM_RM_MASK))
13604 {
13605 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13606 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13607 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13608 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13609 case 4: /* SIB */
13610 {
13611 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13612
13613 /* Get the index and scale it. */
13614 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13615 {
13616 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13617 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13618 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13619 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13620 case 4: u32EffAddr = 0; /*none */ break;
13621 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13622 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13623 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13624 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13625 }
13626 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13627
13628 /* add base */
13629 switch (bSib & X86_SIB_BASE_MASK)
13630 {
13631 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13632 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13633 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13634 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13635 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13636 case 5:
13637 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13638 {
13639 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13640 SET_SS_DEF();
13641 }
13642 else
13643 {
13644 uint32_t u32Disp;
13645 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13646 u32EffAddr += u32Disp;
13647 }
13648 break;
13649 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13650 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13651 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13652 }
13653 break;
13654 }
13655 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13656 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13657 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13658 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13659 }
13660
13661 /* Get and add the displacement. */
13662 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13663 {
13664 case 0:
13665 break;
13666 case 1:
13667 {
13668 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13669 u32EffAddr += i8Disp;
13670 break;
13671 }
13672 case 2:
13673 {
13674 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13675 u32EffAddr += u32Disp;
13676 break;
13677 }
13678 default:
13679 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13680 }
13681 }
13682
13683 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13684 {
13685 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13686 return u32EffAddr;
13687 }
13688 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13689 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13690 return u32EffAddr & UINT16_MAX;
13691 }
13692
13693 uint64_t u64EffAddr;
13694
13695 /* Handle the rip+disp32 form with no registers first. */
13696 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13697 {
13698 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13699 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13700 }
13701 else
13702 {
13703 /* Get the register (or SIB) value. */
13704 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13705 {
13706 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13707 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13708 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13709 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13710 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13711 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13712 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13713 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13714 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13715 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13716 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13717 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13718 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13719 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13720 /* SIB */
13721 case 4:
13722 case 12:
13723 {
13724 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13725
13726 /* Get the index and scale it. */
13727 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13728 {
13729 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13730 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13731 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13732 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13733 case 4: u64EffAddr = 0; /*none */ break;
13734 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13735 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13736 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13737 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13738 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13739 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13740 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13741 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13742 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13743 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13744 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13745 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13746 }
13747 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13748
13749 /* add base */
13750 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13751 {
13752 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13753 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13754 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13755 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13756 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13757 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13758 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13759 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13760 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13761 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13762 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13763 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13764 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13765 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13766 /* complicated encodings */
13767 case 5:
13768 case 13:
13769 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13770 {
13771 if (!pVCpu->iem.s.uRexB)
13772 {
13773 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13774 SET_SS_DEF();
13775 }
13776 else
13777 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13778 }
13779 else
13780 {
13781 uint32_t u32Disp;
13782 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13783 u64EffAddr += (int32_t)u32Disp;
13784 }
13785 break;
13786 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13787 }
13788 break;
13789 }
13790 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13791 }
13792
13793 /* Get and add the displacement. */
13794 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13795 {
13796 case 0:
13797 break;
13798 case 1:
13799 {
13800 int8_t i8Disp;
13801 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13802 u64EffAddr += i8Disp;
13803 break;
13804 }
13805 case 2:
13806 {
13807 uint32_t u32Disp;
13808 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13809 u64EffAddr += (int32_t)u32Disp;
13810 break;
13811 }
13812 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13813 }
13814
13815 }
13816
13817 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13818 {
13819 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13820 return u64EffAddr;
13821 }
13822 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13823 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13824 return u64EffAddr & UINT32_MAX;
13825}
13826#endif /* IEM_WITH_SETJMP */
13827
13828/** @} */
13829
13830
13831
13832/*
13833 * Include the instructions
13834 */
13835#include "IEMAllInstructions.cpp.h"
13836
13837
13838
13839#ifdef LOG_ENABLED
13840/**
13841 * Logs the current instruction.
13842 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13843 * @param fSameCtx Set if we have the same context information as the VMM,
13844 * clear if we may have already executed an instruction in
13845 * our debug context. When clear, we assume IEMCPU holds
13846 * valid CPU mode info.
13847 *
13848 * The @a fSameCtx parameter is now misleading and obsolete.
13849 * @param pszFunction The IEM function doing the execution.
13850 */
13851IEM_STATIC void iemLogCurInstr(PVMCPU pVCpu, bool fSameCtx, const char *pszFunction)
13852{
13853# ifdef IN_RING3
13854 if (LogIs2Enabled())
13855 {
13856 char szInstr[256];
13857 uint32_t cbInstr = 0;
13858 if (fSameCtx)
13859 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13860 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13861 szInstr, sizeof(szInstr), &cbInstr);
13862 else
13863 {
13864 uint32_t fFlags = 0;
13865 switch (pVCpu->iem.s.enmCpuMode)
13866 {
13867 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13868 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13869 case IEMMODE_16BIT:
13870 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13871 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13872 else
13873 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13874 break;
13875 }
13876 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13877 szInstr, sizeof(szInstr), &cbInstr);
13878 }
13879
13880 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13881 Log2(("**** %s\n"
13882 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13883 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13884 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13885 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13886 " %s\n"
13887 , pszFunction,
13888 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13889 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13890 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13891 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13892 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13893 szInstr));
13894
13895 if (LogIs3Enabled())
13896 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13897 }
13898 else
13899# endif
13900 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13901 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13902 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13903}
13904#endif /* LOG_ENABLED */
13905
13906
13907/**
13908 * Makes status code addjustments (pass up from I/O and access handler)
13909 * as well as maintaining statistics.
13910 *
13911 * @returns Strict VBox status code to pass up.
13912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13913 * @param rcStrict The status from executing an instruction.
13914 */
13915DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
13916{
13917 if (rcStrict != VINF_SUCCESS)
13918 {
13919 if (RT_SUCCESS(rcStrict))
13920 {
13921 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13922 || rcStrict == VINF_IOM_R3_IOPORT_READ
13923 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13924 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13925 || rcStrict == VINF_IOM_R3_MMIO_READ
13926 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13927 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13928 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13929 || rcStrict == VINF_CPUM_R3_MSR_READ
13930 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13931 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13932 || rcStrict == VINF_EM_RAW_TO_R3
13933 || rcStrict == VINF_EM_TRIPLE_FAULT
13934 || rcStrict == VINF_GIM_R3_HYPERCALL
13935 /* raw-mode / virt handlers only: */
13936 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13937 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13938 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13939 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13940 || rcStrict == VINF_SELM_SYNC_GDT
13941 || rcStrict == VINF_CSAM_PENDING_ACTION
13942 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13943 /* nested hw.virt codes: */
13944 || rcStrict == VINF_VMX_VMEXIT
13945 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13946 || rcStrict == VINF_SVM_VMEXIT
13947 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13948/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13949 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13950#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13951 if ( ( rcStrict == VINF_VMX_VMEXIT
13952 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR)
13953 && rcPassUp == VINF_SUCCESS)
13954 rcStrict = VINF_SUCCESS;
13955 else
13956#endif
13957#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13958 if ( rcStrict == VINF_SVM_VMEXIT
13959 && rcPassUp == VINF_SUCCESS)
13960 rcStrict = VINF_SUCCESS;
13961 else
13962#endif
13963 if (rcPassUp == VINF_SUCCESS)
13964 pVCpu->iem.s.cRetInfStatuses++;
13965 else if ( rcPassUp < VINF_EM_FIRST
13966 || rcPassUp > VINF_EM_LAST
13967 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13968 {
13969 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13970 pVCpu->iem.s.cRetPassUpStatus++;
13971 rcStrict = rcPassUp;
13972 }
13973 else
13974 {
13975 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13976 pVCpu->iem.s.cRetInfStatuses++;
13977 }
13978 }
13979 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13980 pVCpu->iem.s.cRetAspectNotImplemented++;
13981 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13982 pVCpu->iem.s.cRetInstrNotImplemented++;
13983 else
13984 pVCpu->iem.s.cRetErrStatuses++;
13985 }
13986 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13987 {
13988 pVCpu->iem.s.cRetPassUpStatus++;
13989 rcStrict = pVCpu->iem.s.rcPassUp;
13990 }
13991
13992 return rcStrict;
13993}
13994
13995
13996/**
13997 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13998 * IEMExecOneWithPrefetchedByPC.
13999 *
14000 * Similar code is found in IEMExecLots.
14001 *
14002 * @return Strict VBox status code.
14003 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14004 * @param fExecuteInhibit If set, execute the instruction following CLI,
14005 * POP SS and MOV SS,GR.
14006 * @param pszFunction The calling function name.
14007 */
14008DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPU pVCpu, bool fExecuteInhibit, const char *pszFunction)
14009{
14010 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14011 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14012 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14013 RT_NOREF_PV(pszFunction);
14014
14015#ifdef IEM_WITH_SETJMP
14016 VBOXSTRICTRC rcStrict;
14017 jmp_buf JmpBuf;
14018 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14019 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14020 if ((rcStrict = setjmp(JmpBuf)) == 0)
14021 {
14022 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14023 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14024 }
14025 else
14026 pVCpu->iem.s.cLongJumps++;
14027 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14028#else
14029 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14030 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14031#endif
14032 if (rcStrict == VINF_SUCCESS)
14033 pVCpu->iem.s.cInstructions++;
14034 if (pVCpu->iem.s.cActiveMappings > 0)
14035 {
14036 Assert(rcStrict != VINF_SUCCESS);
14037 iemMemRollback(pVCpu);
14038 }
14039 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14040 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14041 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14042
14043//#ifdef DEBUG
14044// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14045//#endif
14046
14047 /* Execute the next instruction as well if a cli, pop ss or
14048 mov ss, Gr has just completed successfully. */
14049 if ( fExecuteInhibit
14050 && rcStrict == VINF_SUCCESS
14051 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14052 && EMGetInhibitInterruptsPC(pVCpu) == pVCpu->cpum.GstCtx.rip )
14053 {
14054 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers);
14055 if (rcStrict == VINF_SUCCESS)
14056 {
14057#ifdef LOG_ENABLED
14058 iemLogCurInstr(pVCpu, false, pszFunction);
14059#endif
14060#ifdef IEM_WITH_SETJMP
14061 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14062 if ((rcStrict = setjmp(JmpBuf)) == 0)
14063 {
14064 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14065 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14066 }
14067 else
14068 pVCpu->iem.s.cLongJumps++;
14069 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14070#else
14071 IEM_OPCODE_GET_NEXT_U8(&b);
14072 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14073#endif
14074 if (rcStrict == VINF_SUCCESS)
14075 pVCpu->iem.s.cInstructions++;
14076 if (pVCpu->iem.s.cActiveMappings > 0)
14077 {
14078 Assert(rcStrict != VINF_SUCCESS);
14079 iemMemRollback(pVCpu);
14080 }
14081 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14082 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14083 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14084 }
14085 else if (pVCpu->iem.s.cActiveMappings > 0)
14086 iemMemRollback(pVCpu);
14087 EMSetInhibitInterruptsPC(pVCpu, UINT64_C(0x7777555533331111));
14088 }
14089
14090 /*
14091 * Return value fiddling, statistics and sanity assertions.
14092 */
14093 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14094
14095 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14096 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14097 return rcStrict;
14098}
14099
14100
14101#ifdef IN_RC
14102/**
14103 * Re-enters raw-mode or ensure we return to ring-3.
14104 *
14105 * @returns rcStrict, maybe modified.
14106 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14107 * @param rcStrict The status code returne by the interpreter.
14108 */
14109DECLINLINE(VBOXSTRICTRC) iemRCRawMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14110{
14111 if ( !pVCpu->iem.s.fInPatchCode
14112 && ( rcStrict == VINF_SUCCESS
14113 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED /* pgmPoolAccessPfHandlerFlush */
14114 || rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED /* ditto */ ) )
14115 {
14116 if (pVCpu->cpum.GstCtx.eflags.Bits.u1IF || rcStrict != VINF_SUCCESS)
14117 CPUMRawEnter(pVCpu);
14118 else
14119 {
14120 Log(("iemRCRawMaybeReenter: VINF_EM_RESCHEDULE\n"));
14121 rcStrict = VINF_EM_RESCHEDULE;
14122 }
14123 }
14124 return rcStrict;
14125}
14126#endif
14127
14128
14129/**
14130 * Execute one instruction.
14131 *
14132 * @return Strict VBox status code.
14133 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14134 */
14135VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu)
14136{
14137#ifdef LOG_ENABLED
14138 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14139#endif
14140
14141 /*
14142 * Do the decoding and emulation.
14143 */
14144 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14145 if (rcStrict == VINF_SUCCESS)
14146 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14147 else if (pVCpu->iem.s.cActiveMappings > 0)
14148 iemMemRollback(pVCpu);
14149
14150#ifdef IN_RC
14151 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14152#endif
14153 if (rcStrict != VINF_SUCCESS)
14154 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14155 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14156 return rcStrict;
14157}
14158
14159
14160VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14161{
14162 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14163
14164 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14165 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14166 if (rcStrict == VINF_SUCCESS)
14167 {
14168 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14169 if (pcbWritten)
14170 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14171 }
14172 else if (pVCpu->iem.s.cActiveMappings > 0)
14173 iemMemRollback(pVCpu);
14174
14175#ifdef IN_RC
14176 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14177#endif
14178 return rcStrict;
14179}
14180
14181
14182VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14183 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14184{
14185 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14186
14187 VBOXSTRICTRC rcStrict;
14188 if ( cbOpcodeBytes
14189 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14190 {
14191 iemInitDecoder(pVCpu, false);
14192#ifdef IEM_WITH_CODE_TLB
14193 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14194 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14195 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14196 pVCpu->iem.s.offCurInstrStart = 0;
14197 pVCpu->iem.s.offInstrNextByte = 0;
14198#else
14199 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14200 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14201#endif
14202 rcStrict = VINF_SUCCESS;
14203 }
14204 else
14205 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14206 if (rcStrict == VINF_SUCCESS)
14207 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14208 else if (pVCpu->iem.s.cActiveMappings > 0)
14209 iemMemRollback(pVCpu);
14210
14211#ifdef IN_RC
14212 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14213#endif
14214 return rcStrict;
14215}
14216
14217
14218VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14219{
14220 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14221
14222 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14223 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14224 if (rcStrict == VINF_SUCCESS)
14225 {
14226 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14227 if (pcbWritten)
14228 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14229 }
14230 else if (pVCpu->iem.s.cActiveMappings > 0)
14231 iemMemRollback(pVCpu);
14232
14233#ifdef IN_RC
14234 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14235#endif
14236 return rcStrict;
14237}
14238
14239
14240VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14241 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14242{
14243 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14244
14245 VBOXSTRICTRC rcStrict;
14246 if ( cbOpcodeBytes
14247 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14248 {
14249 iemInitDecoder(pVCpu, true);
14250#ifdef IEM_WITH_CODE_TLB
14251 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14252 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14253 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14254 pVCpu->iem.s.offCurInstrStart = 0;
14255 pVCpu->iem.s.offInstrNextByte = 0;
14256#else
14257 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14258 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14259#endif
14260 rcStrict = VINF_SUCCESS;
14261 }
14262 else
14263 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14264 if (rcStrict == VINF_SUCCESS)
14265 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14266 else if (pVCpu->iem.s.cActiveMappings > 0)
14267 iemMemRollback(pVCpu);
14268
14269#ifdef IN_RC
14270 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14271#endif
14272 return rcStrict;
14273}
14274
14275
14276/**
14277 * For debugging DISGetParamSize, may come in handy.
14278 *
14279 * @returns Strict VBox status code.
14280 * @param pVCpu The cross context virtual CPU structure of the
14281 * calling EMT.
14282 * @param pCtxCore The context core structure.
14283 * @param OpcodeBytesPC The PC of the opcode bytes.
14284 * @param pvOpcodeBytes Prefeched opcode bytes.
14285 * @param cbOpcodeBytes Number of prefetched bytes.
14286 * @param pcbWritten Where to return the number of bytes written.
14287 * Optional.
14288 */
14289VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14290 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14291 uint32_t *pcbWritten)
14292{
14293 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14294
14295 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14296 VBOXSTRICTRC rcStrict;
14297 if ( cbOpcodeBytes
14298 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14299 {
14300 iemInitDecoder(pVCpu, true);
14301#ifdef IEM_WITH_CODE_TLB
14302 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14303 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14304 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14305 pVCpu->iem.s.offCurInstrStart = 0;
14306 pVCpu->iem.s.offInstrNextByte = 0;
14307#else
14308 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14309 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14310#endif
14311 rcStrict = VINF_SUCCESS;
14312 }
14313 else
14314 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true);
14315 if (rcStrict == VINF_SUCCESS)
14316 {
14317 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14318 if (pcbWritten)
14319 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14320 }
14321 else if (pVCpu->iem.s.cActiveMappings > 0)
14322 iemMemRollback(pVCpu);
14323
14324#ifdef IN_RC
14325 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14326#endif
14327 return rcStrict;
14328}
14329
14330
14331VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPU pVCpu, uint32_t *pcInstructions)
14332{
14333 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14334
14335 /*
14336 * See if there is an interrupt pending in TRPM, inject it if we can.
14337 */
14338 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14339#if defined(VBOX_WITH_NESTED_HWVIRT_SVM)
14340 bool fIntrEnabled = pVCpu->cpum.GstCtx.hwvirt.fGif;
14341 if (fIntrEnabled)
14342 {
14343 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
14344 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, IEM_GET_CTX(pVCpu));
14345 else
14346 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14347 }
14348#else
14349 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14350#endif
14351 if ( fIntrEnabled
14352 && TRPMHasTrap(pVCpu)
14353 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14354 {
14355 uint8_t u8TrapNo;
14356 TRPMEVENT enmType;
14357 RTGCUINT uErrCode;
14358 RTGCPTR uCr2;
14359 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);
14360 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14361 TRPMResetTrap(pVCpu);
14362 }
14363
14364 /*
14365 * Initial decoder init w/ prefetch, then setup setjmp.
14366 */
14367 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14368 if (rcStrict == VINF_SUCCESS)
14369 {
14370#ifdef IEM_WITH_SETJMP
14371 jmp_buf JmpBuf;
14372 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14373 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14374 pVCpu->iem.s.cActiveMappings = 0;
14375 if ((rcStrict = setjmp(JmpBuf)) == 0)
14376#endif
14377 {
14378 /*
14379 * The run loop. We limit ourselves to 4096 instructions right now.
14380 */
14381 PVM pVM = pVCpu->CTX_SUFF(pVM);
14382 uint32_t cInstr = 4096;
14383 for (;;)
14384 {
14385 /*
14386 * Log the state.
14387 */
14388#ifdef LOG_ENABLED
14389 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14390#endif
14391
14392 /*
14393 * Do the decoding and emulation.
14394 */
14395 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14396 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14397 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14398 {
14399 Assert(pVCpu->iem.s.cActiveMappings == 0);
14400 pVCpu->iem.s.cInstructions++;
14401 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14402 {
14403 uint64_t fCpu = pVCpu->fLocalForcedActions
14404 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14405 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14406 | VMCPU_FF_TLB_FLUSH
14407#ifdef VBOX_WITH_RAW_MODE
14408 | VMCPU_FF_TRPM_SYNC_IDT
14409 | VMCPU_FF_SELM_SYNC_TSS
14410 | VMCPU_FF_SELM_SYNC_GDT
14411 | VMCPU_FF_SELM_SYNC_LDT
14412#endif
14413 | VMCPU_FF_INHIBIT_INTERRUPTS
14414 | VMCPU_FF_BLOCK_NMIS
14415 | VMCPU_FF_UNHALT ));
14416
14417 if (RT_LIKELY( ( !fCpu
14418 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14419 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14420 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14421 {
14422 if (cInstr-- > 0)
14423 {
14424 Assert(pVCpu->iem.s.cActiveMappings == 0);
14425 iemReInitDecoder(pVCpu);
14426 continue;
14427 }
14428 }
14429 }
14430 Assert(pVCpu->iem.s.cActiveMappings == 0);
14431 }
14432 else if (pVCpu->iem.s.cActiveMappings > 0)
14433 iemMemRollback(pVCpu);
14434 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14435 break;
14436 }
14437 }
14438#ifdef IEM_WITH_SETJMP
14439 else
14440 {
14441 if (pVCpu->iem.s.cActiveMappings > 0)
14442 iemMemRollback(pVCpu);
14443 pVCpu->iem.s.cLongJumps++;
14444 }
14445 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14446#endif
14447
14448 /*
14449 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14450 */
14451 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14452 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14453 }
14454 else
14455 {
14456 if (pVCpu->iem.s.cActiveMappings > 0)
14457 iemMemRollback(pVCpu);
14458
14459#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14460 /*
14461 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14462 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14463 */
14464 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14465#endif
14466 }
14467
14468 /*
14469 * Maybe re-enter raw-mode and log.
14470 */
14471#ifdef IN_RC
14472 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14473#endif
14474 if (rcStrict != VINF_SUCCESS)
14475 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14476 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14477 if (pcInstructions)
14478 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14479 return rcStrict;
14480}
14481
14482
14483/**
14484 * Interface used by EMExecuteExec, does exit statistics and limits.
14485 *
14486 * @returns Strict VBox status code.
14487 * @param pVCpu The cross context virtual CPU structure.
14488 * @param fWillExit To be defined.
14489 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14490 * @param cMaxInstructions Maximum number of instructions to execute.
14491 * @param cMaxInstructionsWithoutExits
14492 * The max number of instructions without exits.
14493 * @param pStats Where to return statistics.
14494 */
14495VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPU pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14496 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14497{
14498 NOREF(fWillExit); /** @todo define flexible exit crits */
14499
14500 /*
14501 * Initialize return stats.
14502 */
14503 pStats->cInstructions = 0;
14504 pStats->cExits = 0;
14505 pStats->cMaxExitDistance = 0;
14506 pStats->cReserved = 0;
14507
14508 /*
14509 * Initial decoder init w/ prefetch, then setup setjmp.
14510 */
14511 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false);
14512 if (rcStrict == VINF_SUCCESS)
14513 {
14514#ifdef IEM_WITH_SETJMP
14515 jmp_buf JmpBuf;
14516 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14517 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14518 pVCpu->iem.s.cActiveMappings = 0;
14519 if ((rcStrict = setjmp(JmpBuf)) == 0)
14520#endif
14521 {
14522#ifdef IN_RING0
14523 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14524#endif
14525 uint32_t cInstructionSinceLastExit = 0;
14526
14527 /*
14528 * The run loop. We limit ourselves to 4096 instructions right now.
14529 */
14530 PVM pVM = pVCpu->CTX_SUFF(pVM);
14531 for (;;)
14532 {
14533 /*
14534 * Log the state.
14535 */
14536#ifdef LOG_ENABLED
14537 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14538#endif
14539
14540 /*
14541 * Do the decoding and emulation.
14542 */
14543 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14544
14545 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14546 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14547
14548 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14549 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14550 {
14551 pStats->cExits += 1;
14552 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14553 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14554 cInstructionSinceLastExit = 0;
14555 }
14556
14557 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14558 {
14559 Assert(pVCpu->iem.s.cActiveMappings == 0);
14560 pVCpu->iem.s.cInstructions++;
14561 pStats->cInstructions++;
14562 cInstructionSinceLastExit++;
14563 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14564 {
14565 uint64_t fCpu = pVCpu->fLocalForcedActions
14566 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14567 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14568 | VMCPU_FF_TLB_FLUSH
14569#ifdef VBOX_WITH_RAW_MODE
14570 | VMCPU_FF_TRPM_SYNC_IDT
14571 | VMCPU_FF_SELM_SYNC_TSS
14572 | VMCPU_FF_SELM_SYNC_GDT
14573 | VMCPU_FF_SELM_SYNC_LDT
14574#endif
14575 | VMCPU_FF_INHIBIT_INTERRUPTS
14576 | VMCPU_FF_BLOCK_NMIS
14577 | VMCPU_FF_UNHALT ));
14578
14579 if (RT_LIKELY( ( ( !fCpu
14580 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14581 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14582 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14583 || pStats->cInstructions < cMinInstructions))
14584 {
14585 if (pStats->cInstructions < cMaxInstructions)
14586 {
14587 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14588 {
14589#ifdef IN_RING0
14590 if ( !fCheckPreemptionPending
14591 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14592#endif
14593 {
14594 Assert(pVCpu->iem.s.cActiveMappings == 0);
14595 iemReInitDecoder(pVCpu);
14596 continue;
14597 }
14598#ifdef IN_RING0
14599 rcStrict = VINF_EM_RAW_INTERRUPT;
14600 break;
14601#endif
14602 }
14603 }
14604 }
14605 Assert(!(fCpu & VMCPU_FF_IEM));
14606 }
14607 Assert(pVCpu->iem.s.cActiveMappings == 0);
14608 }
14609 else if (pVCpu->iem.s.cActiveMappings > 0)
14610 iemMemRollback(pVCpu);
14611 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14612 break;
14613 }
14614 }
14615#ifdef IEM_WITH_SETJMP
14616 else
14617 {
14618 if (pVCpu->iem.s.cActiveMappings > 0)
14619 iemMemRollback(pVCpu);
14620 pVCpu->iem.s.cLongJumps++;
14621 }
14622 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14623#endif
14624
14625 /*
14626 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14627 */
14628 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14629 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14630 }
14631 else
14632 {
14633 if (pVCpu->iem.s.cActiveMappings > 0)
14634 iemMemRollback(pVCpu);
14635
14636#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14637 /*
14638 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14639 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14640 */
14641 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14642#endif
14643 }
14644
14645 /*
14646 * Maybe re-enter raw-mode and log.
14647 */
14648#ifdef IN_RC
14649 rcStrict = iemRCRawMaybeReenter(pVCpu, rcStrict);
14650#endif
14651 if (rcStrict != VINF_SUCCESS)
14652 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14653 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14654 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14655 return rcStrict;
14656}
14657
14658
14659/**
14660 * Injects a trap, fault, abort, software interrupt or external interrupt.
14661 *
14662 * The parameter list matches TRPMQueryTrapAll pretty closely.
14663 *
14664 * @returns Strict VBox status code.
14665 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14666 * @param u8TrapNo The trap number.
14667 * @param enmType What type is it (trap/fault/abort), software
14668 * interrupt or hardware interrupt.
14669 * @param uErrCode The error code if applicable.
14670 * @param uCr2 The CR2 value if applicable.
14671 * @param cbInstr The instruction length (only relevant for
14672 * software interrupts).
14673 */
14674VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPU pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14675 uint8_t cbInstr)
14676{
14677 iemInitDecoder(pVCpu, false);
14678#ifdef DBGFTRACE_ENABLED
14679 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14680 u8TrapNo, enmType, uErrCode, uCr2);
14681#endif
14682
14683 uint32_t fFlags;
14684 switch (enmType)
14685 {
14686 case TRPM_HARDWARE_INT:
14687 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14688 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14689 uErrCode = uCr2 = 0;
14690 break;
14691
14692 case TRPM_SOFTWARE_INT:
14693 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14694 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14695 uErrCode = uCr2 = 0;
14696 break;
14697
14698 case TRPM_TRAP:
14699 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14700 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14701 if (u8TrapNo == X86_XCPT_PF)
14702 fFlags |= IEM_XCPT_FLAGS_CR2;
14703 switch (u8TrapNo)
14704 {
14705 case X86_XCPT_DF:
14706 case X86_XCPT_TS:
14707 case X86_XCPT_NP:
14708 case X86_XCPT_SS:
14709 case X86_XCPT_PF:
14710 case X86_XCPT_AC:
14711 fFlags |= IEM_XCPT_FLAGS_ERR;
14712 break;
14713
14714 case X86_XCPT_NMI:
14715 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
14716 break;
14717 }
14718 break;
14719
14720 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14721 }
14722
14723 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14724
14725 if (pVCpu->iem.s.cActiveMappings > 0)
14726 iemMemRollback(pVCpu);
14727
14728 return rcStrict;
14729}
14730
14731
14732/**
14733 * Injects the active TRPM event.
14734 *
14735 * @returns Strict VBox status code.
14736 * @param pVCpu The cross context virtual CPU structure.
14737 */
14738VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPU pVCpu)
14739{
14740#ifndef IEM_IMPLEMENTS_TASKSWITCH
14741 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14742#else
14743 uint8_t u8TrapNo;
14744 TRPMEVENT enmType;
14745 RTGCUINT uErrCode;
14746 RTGCUINTPTR uCr2;
14747 uint8_t cbInstr;
14748 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr);
14749 if (RT_FAILURE(rc))
14750 return rc;
14751
14752 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14753# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14754 if (rcStrict == VINF_SVM_VMEXIT)
14755 rcStrict = VINF_SUCCESS;
14756# endif
14757
14758 /** @todo Are there any other codes that imply the event was successfully
14759 * delivered to the guest? See @bugref{6607}. */
14760 if ( rcStrict == VINF_SUCCESS
14761 || rcStrict == VINF_IEM_RAISED_XCPT)
14762 TRPMResetTrap(pVCpu);
14763
14764 return rcStrict;
14765#endif
14766}
14767
14768
14769VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14770{
14771 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14772 return VERR_NOT_IMPLEMENTED;
14773}
14774
14775
14776VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14777{
14778 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14779 return VERR_NOT_IMPLEMENTED;
14780}
14781
14782
14783#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14784/**
14785 * Executes a IRET instruction with default operand size.
14786 *
14787 * This is for PATM.
14788 *
14789 * @returns VBox status code.
14790 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14791 * @param pCtxCore The register frame.
14792 */
14793VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPU pVCpu, PCPUMCTXCORE pCtxCore)
14794{
14795 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14796
14797 iemCtxCoreToCtx(pCtx, pCtxCore);
14798 iemInitDecoder(pVCpu);
14799 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14800 if (rcStrict == VINF_SUCCESS)
14801 iemCtxToCtxCore(pCtxCore, pCtx);
14802 else
14803 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14804 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14805 return rcStrict;
14806}
14807#endif
14808
14809
14810/**
14811 * Macro used by the IEMExec* method to check the given instruction length.
14812 *
14813 * Will return on failure!
14814 *
14815 * @param a_cbInstr The given instruction length.
14816 * @param a_cbMin The minimum length.
14817 */
14818#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14819 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14820 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14821
14822
14823/**
14824 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14825 *
14826 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14827 *
14828 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14829 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14830 * @param rcStrict The status code to fiddle.
14831 */
14832DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
14833{
14834 iemUninitExec(pVCpu);
14835#ifdef IN_RC
14836 return iemRCRawMaybeReenter(pVCpu, iemExecStatusCodeFiddling(pVCpu, rcStrict));
14837#else
14838 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14839#endif
14840}
14841
14842
14843/**
14844 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14845 *
14846 * This API ASSUMES that the caller has already verified that the guest code is
14847 * allowed to access the I/O port. (The I/O port is in the DX register in the
14848 * guest state.)
14849 *
14850 * @returns Strict VBox status code.
14851 * @param pVCpu The cross context virtual CPU structure.
14852 * @param cbValue The size of the I/O port access (1, 2, or 4).
14853 * @param enmAddrMode The addressing mode.
14854 * @param fRepPrefix Indicates whether a repeat prefix is used
14855 * (doesn't matter which for this instruction).
14856 * @param cbInstr The instruction length in bytes.
14857 * @param iEffSeg The effective segment address.
14858 * @param fIoChecked Whether the access to the I/O port has been
14859 * checked or not. It's typically checked in the
14860 * HM scenario.
14861 */
14862VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14863 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14864{
14865 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14866 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14867
14868 /*
14869 * State init.
14870 */
14871 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14872
14873 /*
14874 * Switch orgy for getting to the right handler.
14875 */
14876 VBOXSTRICTRC rcStrict;
14877 if (fRepPrefix)
14878 {
14879 switch (enmAddrMode)
14880 {
14881 case IEMMODE_16BIT:
14882 switch (cbValue)
14883 {
14884 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14885 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14886 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14887 default:
14888 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14889 }
14890 break;
14891
14892 case IEMMODE_32BIT:
14893 switch (cbValue)
14894 {
14895 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14896 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14897 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14898 default:
14899 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14900 }
14901 break;
14902
14903 case IEMMODE_64BIT:
14904 switch (cbValue)
14905 {
14906 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14907 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14908 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14909 default:
14910 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14911 }
14912 break;
14913
14914 default:
14915 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14916 }
14917 }
14918 else
14919 {
14920 switch (enmAddrMode)
14921 {
14922 case IEMMODE_16BIT:
14923 switch (cbValue)
14924 {
14925 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14926 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14927 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14928 default:
14929 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14930 }
14931 break;
14932
14933 case IEMMODE_32BIT:
14934 switch (cbValue)
14935 {
14936 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14937 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14938 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14939 default:
14940 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14941 }
14942 break;
14943
14944 case IEMMODE_64BIT:
14945 switch (cbValue)
14946 {
14947 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14948 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14949 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14950 default:
14951 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14952 }
14953 break;
14954
14955 default:
14956 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14957 }
14958 }
14959
14960 if (pVCpu->iem.s.cActiveMappings)
14961 iemMemRollback(pVCpu);
14962
14963 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14964}
14965
14966
14967/**
14968 * Interface for HM and EM for executing string I/O IN (read) instructions.
14969 *
14970 * This API ASSUMES that the caller has already verified that the guest code is
14971 * allowed to access the I/O port. (The I/O port is in the DX register in the
14972 * guest state.)
14973 *
14974 * @returns Strict VBox status code.
14975 * @param pVCpu The cross context virtual CPU structure.
14976 * @param cbValue The size of the I/O port access (1, 2, or 4).
14977 * @param enmAddrMode The addressing mode.
14978 * @param fRepPrefix Indicates whether a repeat prefix is used
14979 * (doesn't matter which for this instruction).
14980 * @param cbInstr The instruction length in bytes.
14981 * @param fIoChecked Whether the access to the I/O port has been
14982 * checked or not. It's typically checked in the
14983 * HM scenario.
14984 */
14985VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPU pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14986 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14987{
14988 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14989
14990 /*
14991 * State init.
14992 */
14993 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14994
14995 /*
14996 * Switch orgy for getting to the right handler.
14997 */
14998 VBOXSTRICTRC rcStrict;
14999 if (fRepPrefix)
15000 {
15001 switch (enmAddrMode)
15002 {
15003 case IEMMODE_16BIT:
15004 switch (cbValue)
15005 {
15006 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15007 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15008 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15009 default:
15010 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15011 }
15012 break;
15013
15014 case IEMMODE_32BIT:
15015 switch (cbValue)
15016 {
15017 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15018 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15019 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15020 default:
15021 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15022 }
15023 break;
15024
15025 case IEMMODE_64BIT:
15026 switch (cbValue)
15027 {
15028 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15029 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15030 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15031 default:
15032 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15033 }
15034 break;
15035
15036 default:
15037 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15038 }
15039 }
15040 else
15041 {
15042 switch (enmAddrMode)
15043 {
15044 case IEMMODE_16BIT:
15045 switch (cbValue)
15046 {
15047 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15048 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15049 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15050 default:
15051 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15052 }
15053 break;
15054
15055 case IEMMODE_32BIT:
15056 switch (cbValue)
15057 {
15058 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15059 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15060 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15061 default:
15062 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15063 }
15064 break;
15065
15066 case IEMMODE_64BIT:
15067 switch (cbValue)
15068 {
15069 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15070 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15071 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15072 default:
15073 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15074 }
15075 break;
15076
15077 default:
15078 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15079 }
15080 }
15081
15082 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15083 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15084}
15085
15086
15087/**
15088 * Interface for rawmode to write execute an OUT instruction.
15089 *
15090 * @returns Strict VBox status code.
15091 * @param pVCpu The cross context virtual CPU structure.
15092 * @param cbInstr The instruction length in bytes.
15093 * @param u16Port The port to read.
15094 * @param fImm Whether the port is specified using an immediate operand or
15095 * using the implicit DX register.
15096 * @param cbReg The register size.
15097 *
15098 * @remarks In ring-0 not all of the state needs to be synced in.
15099 */
15100VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15101{
15102 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15103 Assert(cbReg <= 4 && cbReg != 3);
15104
15105 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15106 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15107 Assert(!pVCpu->iem.s.cActiveMappings);
15108 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15109}
15110
15111
15112/**
15113 * Interface for rawmode to write execute an IN instruction.
15114 *
15115 * @returns Strict VBox status code.
15116 * @param pVCpu The cross context virtual CPU structure.
15117 * @param cbInstr The instruction length in bytes.
15118 * @param u16Port The port to read.
15119 * @param fImm Whether the port is specified using an immediate operand or
15120 * using the implicit DX.
15121 * @param cbReg The register size.
15122 */
15123VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPU pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15124{
15125 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15126 Assert(cbReg <= 4 && cbReg != 3);
15127
15128 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15129 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15130 Assert(!pVCpu->iem.s.cActiveMappings);
15131 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15132}
15133
15134
15135/**
15136 * Interface for HM and EM to write to a CRx register.
15137 *
15138 * @returns Strict VBox status code.
15139 * @param pVCpu The cross context virtual CPU structure.
15140 * @param cbInstr The instruction length in bytes.
15141 * @param iCrReg The control register number (destination).
15142 * @param iGReg The general purpose register number (source).
15143 *
15144 * @remarks In ring-0 not all of the state needs to be synced in.
15145 */
15146VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15147{
15148 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15149 Assert(iCrReg < 16);
15150 Assert(iGReg < 16);
15151
15152 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15153 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15154 Assert(!pVCpu->iem.s.cActiveMappings);
15155 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15156}
15157
15158
15159/**
15160 * Interface for HM and EM to read from a CRx register.
15161 *
15162 * @returns Strict VBox status code.
15163 * @param pVCpu The cross context virtual CPU structure.
15164 * @param cbInstr The instruction length in bytes.
15165 * @param iGReg The general purpose register number (destination).
15166 * @param iCrReg The control register number (source).
15167 *
15168 * @remarks In ring-0 not all of the state needs to be synced in.
15169 */
15170VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPU pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15171{
15172 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15173 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15174 | CPUMCTX_EXTRN_APIC_TPR);
15175 Assert(iCrReg < 16);
15176 Assert(iGReg < 16);
15177
15178 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15179 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15180 Assert(!pVCpu->iem.s.cActiveMappings);
15181 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15182}
15183
15184
15185/**
15186 * Interface for HM and EM to clear the CR0[TS] bit.
15187 *
15188 * @returns Strict VBox status code.
15189 * @param pVCpu The cross context virtual CPU structure.
15190 * @param cbInstr The instruction length in bytes.
15191 *
15192 * @remarks In ring-0 not all of the state needs to be synced in.
15193 */
15194VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPU pVCpu, uint8_t cbInstr)
15195{
15196 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15197
15198 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15199 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15200 Assert(!pVCpu->iem.s.cActiveMappings);
15201 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15202}
15203
15204
15205/**
15206 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15207 *
15208 * @returns Strict VBox status code.
15209 * @param pVCpu The cross context virtual CPU structure.
15210 * @param cbInstr The instruction length in bytes.
15211 * @param uValue The value to load into CR0.
15212 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15213 * memory operand. Otherwise pass NIL_RTGCPTR.
15214 *
15215 * @remarks In ring-0 not all of the state needs to be synced in.
15216 */
15217VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPU pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15218{
15219 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15220
15221 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15222 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15223 Assert(!pVCpu->iem.s.cActiveMappings);
15224 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15225}
15226
15227
15228/**
15229 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15230 *
15231 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15232 *
15233 * @returns Strict VBox status code.
15234 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15235 * @param cbInstr The instruction length in bytes.
15236 * @remarks In ring-0 not all of the state needs to be synced in.
15237 * @thread EMT(pVCpu)
15238 */
15239VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPU pVCpu, uint8_t cbInstr)
15240{
15241 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15242
15243 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15244 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15245 Assert(!pVCpu->iem.s.cActiveMappings);
15246 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15247}
15248
15249
15250/**
15251 * Interface for HM and EM to emulate the WBINVD instruction.
15252 *
15253 * @returns Strict VBox status code.
15254 * @param pVCpu The cross context virtual CPU structure.
15255 * @param cbInstr The instruction length in bytes.
15256 *
15257 * @remarks In ring-0 not all of the state needs to be synced in.
15258 */
15259VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPU pVCpu, uint8_t cbInstr)
15260{
15261 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15262
15263 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15264 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15265 Assert(!pVCpu->iem.s.cActiveMappings);
15266 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15267}
15268
15269
15270/**
15271 * Interface for HM and EM to emulate the INVD instruction.
15272 *
15273 * @returns Strict VBox status code.
15274 * @param pVCpu The cross context virtual CPU structure.
15275 * @param cbInstr The instruction length in bytes.
15276 *
15277 * @remarks In ring-0 not all of the state needs to be synced in.
15278 */
15279VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPU pVCpu, uint8_t cbInstr)
15280{
15281 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15282
15283 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15284 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15285 Assert(!pVCpu->iem.s.cActiveMappings);
15286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15287}
15288
15289
15290/**
15291 * Interface for HM and EM to emulate the INVLPG instruction.
15292 *
15293 * @returns Strict VBox status code.
15294 * @retval VINF_PGM_SYNC_CR3
15295 *
15296 * @param pVCpu The cross context virtual CPU structure.
15297 * @param cbInstr The instruction length in bytes.
15298 * @param GCPtrPage The effective address of the page to invalidate.
15299 *
15300 * @remarks In ring-0 not all of the state needs to be synced in.
15301 */
15302VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPU pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15303{
15304 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15305
15306 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15307 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15308 Assert(!pVCpu->iem.s.cActiveMappings);
15309 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15310}
15311
15312
15313/**
15314 * Interface for HM and EM to emulate the CPUID instruction.
15315 *
15316 * @returns Strict VBox status code.
15317 *
15318 * @param pVCpu The cross context virtual CPU structure.
15319 * @param cbInstr The instruction length in bytes.
15320 *
15321 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15322 */
15323VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPU pVCpu, uint8_t cbInstr)
15324{
15325 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15326 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15327
15328 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15329 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15330 Assert(!pVCpu->iem.s.cActiveMappings);
15331 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15332}
15333
15334
15335/**
15336 * Interface for HM and EM to emulate the RDPMC instruction.
15337 *
15338 * @returns Strict VBox status code.
15339 *
15340 * @param pVCpu The cross context virtual CPU structure.
15341 * @param cbInstr The instruction length in bytes.
15342 *
15343 * @remarks Not all of the state needs to be synced in.
15344 */
15345VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPU pVCpu, uint8_t cbInstr)
15346{
15347 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15348 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15349
15350 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15352 Assert(!pVCpu->iem.s.cActiveMappings);
15353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15354}
15355
15356
15357/**
15358 * Interface for HM and EM to emulate the RDTSC instruction.
15359 *
15360 * @returns Strict VBox status code.
15361 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15362 *
15363 * @param pVCpu The cross context virtual CPU structure.
15364 * @param cbInstr The instruction length in bytes.
15365 *
15366 * @remarks Not all of the state needs to be synced in.
15367 */
15368VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPU pVCpu, uint8_t cbInstr)
15369{
15370 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15371 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15372
15373 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15374 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15375 Assert(!pVCpu->iem.s.cActiveMappings);
15376 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15377}
15378
15379
15380/**
15381 * Interface for HM and EM to emulate the RDTSCP instruction.
15382 *
15383 * @returns Strict VBox status code.
15384 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15385 *
15386 * @param pVCpu The cross context virtual CPU structure.
15387 * @param cbInstr The instruction length in bytes.
15388 *
15389 * @remarks Not all of the state needs to be synced in. Recommended
15390 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15391 */
15392VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPU pVCpu, uint8_t cbInstr)
15393{
15394 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15395 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15396
15397 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15398 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15399 Assert(!pVCpu->iem.s.cActiveMappings);
15400 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15401}
15402
15403
15404/**
15405 * Interface for HM and EM to emulate the RDMSR instruction.
15406 *
15407 * @returns Strict VBox status code.
15408 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15409 *
15410 * @param pVCpu The cross context virtual CPU structure.
15411 * @param cbInstr The instruction length in bytes.
15412 *
15413 * @remarks Not all of the state needs to be synced in. Requires RCX and
15414 * (currently) all MSRs.
15415 */
15416VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPU pVCpu, uint8_t cbInstr)
15417{
15418 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15419 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15420
15421 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15422 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15423 Assert(!pVCpu->iem.s.cActiveMappings);
15424 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15425}
15426
15427
15428/**
15429 * Interface for HM and EM to emulate the WRMSR instruction.
15430 *
15431 * @returns Strict VBox status code.
15432 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15433 *
15434 * @param pVCpu The cross context virtual CPU structure.
15435 * @param cbInstr The instruction length in bytes.
15436 *
15437 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15438 * and (currently) all MSRs.
15439 */
15440VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPU pVCpu, uint8_t cbInstr)
15441{
15442 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15443 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15444 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15445
15446 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15447 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15448 Assert(!pVCpu->iem.s.cActiveMappings);
15449 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15450}
15451
15452
15453/**
15454 * Interface for HM and EM to emulate the MONITOR instruction.
15455 *
15456 * @returns Strict VBox status code.
15457 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15458 *
15459 * @param pVCpu The cross context virtual CPU structure.
15460 * @param cbInstr The instruction length in bytes.
15461 *
15462 * @remarks Not all of the state needs to be synced in.
15463 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15464 * are used.
15465 */
15466VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPU pVCpu, uint8_t cbInstr)
15467{
15468 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15469 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15470
15471 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15472 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15473 Assert(!pVCpu->iem.s.cActiveMappings);
15474 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15475}
15476
15477
15478/**
15479 * Interface for HM and EM to emulate the MWAIT instruction.
15480 *
15481 * @returns Strict VBox status code.
15482 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15483 *
15484 * @param pVCpu The cross context virtual CPU structure.
15485 * @param cbInstr The instruction length in bytes.
15486 *
15487 * @remarks Not all of the state needs to be synced in.
15488 */
15489VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPU pVCpu, uint8_t cbInstr)
15490{
15491 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15492
15493 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15494 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15495 Assert(!pVCpu->iem.s.cActiveMappings);
15496 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15497}
15498
15499
15500/**
15501 * Interface for HM and EM to emulate the HLT instruction.
15502 *
15503 * @returns Strict VBox status code.
15504 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15505 *
15506 * @param pVCpu The cross context virtual CPU structure.
15507 * @param cbInstr The instruction length in bytes.
15508 *
15509 * @remarks Not all of the state needs to be synced in.
15510 */
15511VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPU pVCpu, uint8_t cbInstr)
15512{
15513 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15514
15515 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15516 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15517 Assert(!pVCpu->iem.s.cActiveMappings);
15518 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15519}
15520
15521
15522/**
15523 * Checks if IEM is in the process of delivering an event (interrupt or
15524 * exception).
15525 *
15526 * @returns true if we're in the process of raising an interrupt or exception,
15527 * false otherwise.
15528 * @param pVCpu The cross context virtual CPU structure.
15529 * @param puVector Where to store the vector associated with the
15530 * currently delivered event, optional.
15531 * @param pfFlags Where to store th event delivery flags (see
15532 * IEM_XCPT_FLAGS_XXX), optional.
15533 * @param puErr Where to store the error code associated with the
15534 * event, optional.
15535 * @param puCr2 Where to store the CR2 associated with the event,
15536 * optional.
15537 * @remarks The caller should check the flags to determine if the error code and
15538 * CR2 are valid for the event.
15539 */
15540VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPU pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15541{
15542 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15543 if (fRaisingXcpt)
15544 {
15545 if (puVector)
15546 *puVector = pVCpu->iem.s.uCurXcpt;
15547 if (pfFlags)
15548 *pfFlags = pVCpu->iem.s.fCurXcpt;
15549 if (puErr)
15550 *puErr = pVCpu->iem.s.uCurXcptErr;
15551 if (puCr2)
15552 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15553 }
15554 return fRaisingXcpt;
15555}
15556
15557#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15558
15559/**
15560 * Interface for HM and EM to emulate the CLGI instruction.
15561 *
15562 * @returns Strict VBox status code.
15563 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15564 * @param cbInstr The instruction length in bytes.
15565 * @thread EMT(pVCpu)
15566 */
15567VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPU pVCpu, uint8_t cbInstr)
15568{
15569 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15570
15571 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15572 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15573 Assert(!pVCpu->iem.s.cActiveMappings);
15574 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15575}
15576
15577
15578/**
15579 * Interface for HM and EM to emulate the STGI instruction.
15580 *
15581 * @returns Strict VBox status code.
15582 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15583 * @param cbInstr The instruction length in bytes.
15584 * @thread EMT(pVCpu)
15585 */
15586VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPU pVCpu, uint8_t cbInstr)
15587{
15588 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15589
15590 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15591 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15592 Assert(!pVCpu->iem.s.cActiveMappings);
15593 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15594}
15595
15596
15597/**
15598 * Interface for HM and EM to emulate the VMLOAD instruction.
15599 *
15600 * @returns Strict VBox status code.
15601 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15602 * @param cbInstr The instruction length in bytes.
15603 * @thread EMT(pVCpu)
15604 */
15605VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPU pVCpu, uint8_t cbInstr)
15606{
15607 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15608
15609 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15610 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15611 Assert(!pVCpu->iem.s.cActiveMappings);
15612 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15613}
15614
15615
15616/**
15617 * Interface for HM and EM to emulate the VMSAVE instruction.
15618 *
15619 * @returns Strict VBox status code.
15620 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15621 * @param cbInstr The instruction length in bytes.
15622 * @thread EMT(pVCpu)
15623 */
15624VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPU pVCpu, uint8_t cbInstr)
15625{
15626 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15627
15628 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15629 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15630 Assert(!pVCpu->iem.s.cActiveMappings);
15631 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15632}
15633
15634
15635/**
15636 * Interface for HM and EM to emulate the INVLPGA instruction.
15637 *
15638 * @returns Strict VBox status code.
15639 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15640 * @param cbInstr The instruction length in bytes.
15641 * @thread EMT(pVCpu)
15642 */
15643VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPU pVCpu, uint8_t cbInstr)
15644{
15645 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15646
15647 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15648 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15649 Assert(!pVCpu->iem.s.cActiveMappings);
15650 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15651}
15652
15653
15654/**
15655 * Interface for HM and EM to emulate the VMRUN instruction.
15656 *
15657 * @returns Strict VBox status code.
15658 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15659 * @param cbInstr The instruction length in bytes.
15660 * @thread EMT(pVCpu)
15661 */
15662VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPU pVCpu, uint8_t cbInstr)
15663{
15664 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15665 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15666
15667 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15668 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15669 Assert(!pVCpu->iem.s.cActiveMappings);
15670 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15671}
15672
15673
15674/**
15675 * Interface for HM and EM to emulate \#VMEXIT.
15676 *
15677 * @returns Strict VBox status code.
15678 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15679 * @param uExitCode The exit code.
15680 * @param uExitInfo1 The exit info. 1 field.
15681 * @param uExitInfo2 The exit info. 2 field.
15682 * @thread EMT(pVCpu)
15683 */
15684VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPU pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15685{
15686 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15687 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15688 if (pVCpu->iem.s.cActiveMappings)
15689 iemMemRollback(pVCpu);
15690 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15691}
15692
15693#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15694
15695#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15696
15697/**
15698 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15699 *
15700 * @returns Strict VBox status code.
15701 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15702 * @thread EMT(pVCpu)
15703 */
15704VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPU pVCpu)
15705{
15706 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15707 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15708 if (pVCpu->iem.s.cActiveMappings)
15709 iemMemRollback(pVCpu);
15710 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15711}
15712
15713
15714/**
15715 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15716 *
15717 * @returns Strict VBox status code.
15718 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15719 * @param uVector The external interrupt vector.
15720 * @param fIntPending Whether the external interrupt is pending or
15721 * acknowdledged in the interrupt controller.
15722 * @thread EMT(pVCpu)
15723 */
15724VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPU pVCpu, uint8_t uVector, bool fIntPending)
15725{
15726 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15727 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15728 if (pVCpu->iem.s.cActiveMappings)
15729 iemMemRollback(pVCpu);
15730 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15731}
15732
15733
15734/**
15735 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15736 *
15737 * @returns Strict VBox status code.
15738 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15739 * @param uVector The SIPI vector.
15740 * @thread EMT(pVCpu)
15741 */
15742VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPU pVCpu, uint8_t uVector)
15743{
15744 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15745 VBOXSTRICTRC rcStrict = iemVmxVmexitStartupIpi(pVCpu, uVector);
15746 if (pVCpu->iem.s.cActiveMappings)
15747 iemMemRollback(pVCpu);
15748 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15749}
15750
15751
15752/**
15753 * Interface for HM and EM to emulate VM-exit due to init-IPI (INIT).
15754 *
15755 * @returns Strict VBox status code.
15756 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15757 * @thread EMT(pVCpu)
15758 */
15759VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInitIpi(PVMCPU pVCpu)
15760{
15761 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15762 VBOXSTRICTRC rcStrict = iemVmxVmexitInitIpi(pVCpu);
15763 if (pVCpu->iem.s.cActiveMappings)
15764 iemMemRollback(pVCpu);
15765 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15766}
15767
15768
15769/**
15770 * Interface for HM and EM to emulate VM-exits for interrupt-windows.
15771 *
15772 * @returns Strict VBox status code.
15773 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15774 * @param uExitReason The VM-exit reason.
15775 * @param uExitQual The VM-exit qualification.
15776 *
15777 * @thread EMT(pVCpu)
15778 */
15779VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitIntWindow(PVMCPU pVCpu)
15780{
15781 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
15782 VBOXSTRICTRC rcStrict = iemVmxVmexitIntWindow(pVCpu);
15783 if (pVCpu->iem.s.cActiveMappings)
15784 iemMemRollback(pVCpu);
15785 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15786}
15787
15788
15789/**
15790 * Interface for HM and EM to emulate the VMREAD instruction.
15791 *
15792 * @returns Strict VBox status code.
15793 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15794 * @param pExitInfo Pointer to the VM-exit information struct.
15795 * @thread EMT(pVCpu)
15796 */
15797VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15798{
15799 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15800 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15801 Assert(pExitInfo);
15802
15803 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15804
15805 VBOXSTRICTRC rcStrict;
15806 uint8_t const cbInstr = pExitInfo->cbInstr;
15807 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15808 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15809 {
15810 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
15811 {
15812 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15813 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, uFieldEnc, pExitInfo);
15814 }
15815 else
15816 {
15817 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15818 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, uFieldEnc, pExitInfo);
15819 }
15820 }
15821 else
15822 {
15823 RTGCPTR GCPtrDst = pExitInfo->GCPtrEffAddr;
15824 uint8_t iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15825 IEMMODE enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15826 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, GCPtrDst, uFieldEnc, pExitInfo);
15827 }
15828 if (pVCpu->iem.s.cActiveMappings)
15829 iemMemRollback(pVCpu);
15830 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15831}
15832
15833
15834/**
15835 * Interface for HM and EM to emulate the VMWRITE instruction.
15836 *
15837 * @returns Strict VBox status code.
15838 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15839 * @param pExitInfo Pointer to the VM-exit information struct.
15840 * @thread EMT(pVCpu)
15841 */
15842VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15843{
15844 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15845 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15846 Assert(pExitInfo);
15847
15848 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15849
15850 uint64_t u64Val;
15851 uint8_t iEffSeg;
15852 IEMMODE enmEffAddrMode;
15853 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
15854 {
15855 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
15856 iEffSeg = UINT8_MAX;
15857 enmEffAddrMode = UINT8_MAX;
15858 }
15859 else
15860 {
15861 u64Val = pExitInfo->GCPtrEffAddr;
15862 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
15863 enmEffAddrMode = (IEMMODE)pExitInfo->InstrInfo.VmreadVmwrite.u3AddrSize;
15864 }
15865 uint8_t const cbInstr = pExitInfo->cbInstr;
15866 uint32_t const uFieldEnc = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
15867 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, enmEffAddrMode, u64Val, uFieldEnc, pExitInfo);
15868 if (pVCpu->iem.s.cActiveMappings)
15869 iemMemRollback(pVCpu);
15870 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15871}
15872
15873
15874/**
15875 * Interface for HM and EM to emulate the VMPTRLD instruction.
15876 *
15877 * @returns Strict VBox status code.
15878 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15879 * @param pExitInfo Pointer to the VM-exit information struct.
15880 * @thread EMT(pVCpu)
15881 */
15882VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15883{
15884 Assert(pExitInfo);
15885 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15886 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15887
15888 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15889
15890 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15891 uint8_t const cbInstr = pExitInfo->cbInstr;
15892 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15893 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15894 if (pVCpu->iem.s.cActiveMappings)
15895 iemMemRollback(pVCpu);
15896 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15897}
15898
15899
15900/**
15901 * Interface for HM and EM to emulate the VMPTRST instruction.
15902 *
15903 * @returns Strict VBox status code.
15904 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15905 * @param pExitInfo Pointer to the VM-exit information struct.
15906 * @thread EMT(pVCpu)
15907 */
15908VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15909{
15910 Assert(pExitInfo);
15911 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15912 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15913
15914 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15915
15916 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15917 uint8_t const cbInstr = pExitInfo->cbInstr;
15918 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15919 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15920 if (pVCpu->iem.s.cActiveMappings)
15921 iemMemRollback(pVCpu);
15922 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15923}
15924
15925
15926/**
15927 * Interface for HM and EM to emulate the VMCLEAR instruction.
15928 *
15929 * @returns Strict VBox status code.
15930 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15931 * @param pExitInfo Pointer to the VM-exit information struct.
15932 * @thread EMT(pVCpu)
15933 */
15934VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15935{
15936 Assert(pExitInfo);
15937 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15938 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15939
15940 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15941
15942 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15943 uint8_t const cbInstr = pExitInfo->cbInstr;
15944 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
15945 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
15946 if (pVCpu->iem.s.cActiveMappings)
15947 iemMemRollback(pVCpu);
15948 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15949}
15950
15951
15952/**
15953 * Interface for HM and EM to emulate the VMXON instruction.
15954 *
15955 * @returns Strict VBox status code.
15956 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15957 * @param pExitInfo Pointer to the VM-exit information struct.
15958 * @thread EMT(pVCpu)
15959 */
15960VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPU pVCpu, PCVMXVEXITINFO pExitInfo)
15961{
15962 Assert(pExitInfo);
15963 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
15964 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
15965
15966 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15967
15968 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
15969 uint8_t const cbInstr = pExitInfo->cbInstr;
15970 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
15971 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
15972 if (pVCpu->iem.s.cActiveMappings)
15973 iemMemRollback(pVCpu);
15974 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15975}
15976
15977
15978/**
15979 * Interface for HM and EM to emulate the VMXOFF instruction.
15980 *
15981 * @returns Strict VBox status code.
15982 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15983 * @param cbInstr The instruction length in bytes.
15984 * @thread EMT(pVCpu)
15985 */
15986VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPU pVCpu, uint8_t cbInstr)
15987{
15988 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15989 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_HM_VMX_MASK);
15990
15991 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15992 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
15993 Assert(!pVCpu->iem.s.cActiveMappings);
15994 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15995}
15996
15997#endif
15998
15999#ifdef IN_RING3
16000
16001/**
16002 * Handles the unlikely and probably fatal merge cases.
16003 *
16004 * @returns Merged status code.
16005 * @param rcStrict Current EM status code.
16006 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16007 * with @a rcStrict.
16008 * @param iMemMap The memory mapping index. For error reporting only.
16009 * @param pVCpu The cross context virtual CPU structure of the calling
16010 * thread, for error reporting only.
16011 */
16012DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16013 unsigned iMemMap, PVMCPU pVCpu)
16014{
16015 if (RT_FAILURE_NP(rcStrict))
16016 return rcStrict;
16017
16018 if (RT_FAILURE_NP(rcStrictCommit))
16019 return rcStrictCommit;
16020
16021 if (rcStrict == rcStrictCommit)
16022 return rcStrictCommit;
16023
16024 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16025 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16026 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16027 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16028 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16029 return VERR_IOM_FF_STATUS_IPE;
16030}
16031
16032
16033/**
16034 * Helper for IOMR3ProcessForceFlag.
16035 *
16036 * @returns Merged status code.
16037 * @param rcStrict Current EM status code.
16038 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16039 * with @a rcStrict.
16040 * @param iMemMap The memory mapping index. For error reporting only.
16041 * @param pVCpu The cross context virtual CPU structure of the calling
16042 * thread, for error reporting only.
16043 */
16044DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPU pVCpu)
16045{
16046 /* Simple. */
16047 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16048 return rcStrictCommit;
16049
16050 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16051 return rcStrict;
16052
16053 /* EM scheduling status codes. */
16054 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16055 && rcStrict <= VINF_EM_LAST))
16056 {
16057 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16058 && rcStrictCommit <= VINF_EM_LAST))
16059 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16060 }
16061
16062 /* Unlikely */
16063 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16064}
16065
16066
16067/**
16068 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16069 *
16070 * @returns Merge between @a rcStrict and what the commit operation returned.
16071 * @param pVM The cross context VM structure.
16072 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16073 * @param rcStrict The status code returned by ring-0 or raw-mode.
16074 */
16075VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rcStrict)
16076{
16077 /*
16078 * Reset the pending commit.
16079 */
16080 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16081 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16082 ("%#x %#x %#x\n",
16083 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16084 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16085
16086 /*
16087 * Commit the pending bounce buffers (usually just one).
16088 */
16089 unsigned cBufs = 0;
16090 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16091 while (iMemMap-- > 0)
16092 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16093 {
16094 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16095 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16096 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16097
16098 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16099 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16100 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16101
16102 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16103 {
16104 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16105 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16106 pbBuf,
16107 cbFirst,
16108 PGMACCESSORIGIN_IEM);
16109 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16110 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16111 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16112 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16113 }
16114
16115 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16116 {
16117 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16118 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16119 pbBuf + cbFirst,
16120 cbSecond,
16121 PGMACCESSORIGIN_IEM);
16122 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16123 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16124 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16125 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16126 }
16127 cBufs++;
16128 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16129 }
16130
16131 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16132 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16133 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16134 pVCpu->iem.s.cActiveMappings = 0;
16135 return rcStrict;
16136}
16137
16138#endif /* IN_RING3 */
16139
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette