VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 91580

Last change on this file since 91580 was 91580, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Made changes to PGM++ to handle invalid PAE PDPEs being loaded.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 651.9 KB
Line 
1/* $Id: IEMAll.cpp 91580 2021-10-06 07:22:04Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler.
442 */
443# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
444 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
445
446#else
447# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
448# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
450# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
453# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
457
458#endif
459
460#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
461/**
462 * Check if an SVM control/instruction intercept is set.
463 */
464# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
465 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
466
467/**
468 * Check if an SVM read CRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
471 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
472
473/**
474 * Check if an SVM write CRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM read DRx intercept is set.
481 */
482# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
483 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
484
485/**
486 * Check if an SVM write DRx intercept is set.
487 */
488# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM exception intercept is set.
493 */
494# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
495 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
496
497/**
498 * Invokes the SVM \#VMEXIT handler for the nested-guest.
499 */
500# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
501 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
502
503/**
504 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
505 * corresponding decode assist information.
506 */
507# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
508 do \
509 { \
510 uint64_t uExitInfo1; \
511 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
512 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
513 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
514 else \
515 uExitInfo1 = 0; \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
517 } while (0)
518
519/** Check and handles SVM nested-guest instruction intercept and updates
520 * NRIP if needed.
521 */
522# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
523 do \
524 { \
525 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
526 { \
527 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
528 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
529 } \
530 } while (0)
531
532/** Checks and handles SVM nested-guest CR0 read intercept. */
533# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
534 do \
535 { \
536 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
537 { /* probably likely */ } \
538 else \
539 { \
540 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
541 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
542 } \
543 } while (0)
544
545/**
546 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
547 */
548# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
549 do { \
550 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
551 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
552 } while (0)
553
554#else
555# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
556# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
557# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
559# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
561# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
562# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
564# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
566
567#endif
568
569
570/*********************************************************************************************************************************
571* Global Variables *
572*********************************************************************************************************************************/
573extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
574
575
576/** Function table for the ADD instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
578{
579 iemAImpl_add_u8, iemAImpl_add_u8_locked,
580 iemAImpl_add_u16, iemAImpl_add_u16_locked,
581 iemAImpl_add_u32, iemAImpl_add_u32_locked,
582 iemAImpl_add_u64, iemAImpl_add_u64_locked
583};
584
585/** Function table for the ADC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
587{
588 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
589 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
590 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
591 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
592};
593
594/** Function table for the SUB instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
596{
597 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
598 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
599 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
600 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
601};
602
603/** Function table for the SBB instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
605{
606 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
607 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
608 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
609 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
610};
611
612/** Function table for the OR instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
614{
615 iemAImpl_or_u8, iemAImpl_or_u8_locked,
616 iemAImpl_or_u16, iemAImpl_or_u16_locked,
617 iemAImpl_or_u32, iemAImpl_or_u32_locked,
618 iemAImpl_or_u64, iemAImpl_or_u64_locked
619};
620
621/** Function table for the XOR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
623{
624 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
625 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
626 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
627 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
628};
629
630/** Function table for the AND instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
632{
633 iemAImpl_and_u8, iemAImpl_and_u8_locked,
634 iemAImpl_and_u16, iemAImpl_and_u16_locked,
635 iemAImpl_and_u32, iemAImpl_and_u32_locked,
636 iemAImpl_and_u64, iemAImpl_and_u64_locked
637};
638
639/** Function table for the CMP instruction.
640 * @remarks Making operand order ASSUMPTIONS.
641 */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
643{
644 iemAImpl_cmp_u8, NULL,
645 iemAImpl_cmp_u16, NULL,
646 iemAImpl_cmp_u32, NULL,
647 iemAImpl_cmp_u64, NULL
648};
649
650/** Function table for the TEST instruction.
651 * @remarks Making operand order ASSUMPTIONS.
652 */
653IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
654{
655 iemAImpl_test_u8, NULL,
656 iemAImpl_test_u16, NULL,
657 iemAImpl_test_u32, NULL,
658 iemAImpl_test_u64, NULL
659};
660
661/** Function table for the BT instruction. */
662IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
663{
664 NULL, NULL,
665 iemAImpl_bt_u16, NULL,
666 iemAImpl_bt_u32, NULL,
667 iemAImpl_bt_u64, NULL
668};
669
670/** Function table for the BTC instruction. */
671IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
672{
673 NULL, NULL,
674 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
675 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
676 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
677};
678
679/** Function table for the BTR instruction. */
680IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
681{
682 NULL, NULL,
683 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
684 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
685 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
686};
687
688/** Function table for the BTS instruction. */
689IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
690{
691 NULL, NULL,
692 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
693 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
694 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
695};
696
697/** Function table for the BSF instruction. */
698IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
699{
700 NULL, NULL,
701 iemAImpl_bsf_u16, NULL,
702 iemAImpl_bsf_u32, NULL,
703 iemAImpl_bsf_u64, NULL
704};
705
706/** Function table for the BSR instruction. */
707IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
708{
709 NULL, NULL,
710 iemAImpl_bsr_u16, NULL,
711 iemAImpl_bsr_u32, NULL,
712 iemAImpl_bsr_u64, NULL
713};
714
715/** Function table for the IMUL instruction. */
716IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
717{
718 NULL, NULL,
719 iemAImpl_imul_two_u16, NULL,
720 iemAImpl_imul_two_u32, NULL,
721 iemAImpl_imul_two_u64, NULL
722};
723
724/** Group 1 /r lookup table. */
725IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
726{
727 &g_iemAImpl_add,
728 &g_iemAImpl_or,
729 &g_iemAImpl_adc,
730 &g_iemAImpl_sbb,
731 &g_iemAImpl_and,
732 &g_iemAImpl_sub,
733 &g_iemAImpl_xor,
734 &g_iemAImpl_cmp
735};
736
737/** Function table for the INC instruction. */
738IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
739{
740 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
741 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
742 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
743 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
744};
745
746/** Function table for the DEC instruction. */
747IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
748{
749 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
750 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
751 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
752 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
753};
754
755/** Function table for the NEG instruction. */
756IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
757{
758 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
759 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
760 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
761 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
762};
763
764/** Function table for the NOT instruction. */
765IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
766{
767 iemAImpl_not_u8, iemAImpl_not_u8_locked,
768 iemAImpl_not_u16, iemAImpl_not_u16_locked,
769 iemAImpl_not_u32, iemAImpl_not_u32_locked,
770 iemAImpl_not_u64, iemAImpl_not_u64_locked
771};
772
773
774/** Function table for the ROL instruction. */
775IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
776{
777 iemAImpl_rol_u8,
778 iemAImpl_rol_u16,
779 iemAImpl_rol_u32,
780 iemAImpl_rol_u64
781};
782
783/** Function table for the ROR instruction. */
784IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
785{
786 iemAImpl_ror_u8,
787 iemAImpl_ror_u16,
788 iemAImpl_ror_u32,
789 iemAImpl_ror_u64
790};
791
792/** Function table for the RCL instruction. */
793IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
794{
795 iemAImpl_rcl_u8,
796 iemAImpl_rcl_u16,
797 iemAImpl_rcl_u32,
798 iemAImpl_rcl_u64
799};
800
801/** Function table for the RCR instruction. */
802IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
803{
804 iemAImpl_rcr_u8,
805 iemAImpl_rcr_u16,
806 iemAImpl_rcr_u32,
807 iemAImpl_rcr_u64
808};
809
810/** Function table for the SHL instruction. */
811IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
812{
813 iemAImpl_shl_u8,
814 iemAImpl_shl_u16,
815 iemAImpl_shl_u32,
816 iemAImpl_shl_u64
817};
818
819/** Function table for the SHR instruction. */
820IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
821{
822 iemAImpl_shr_u8,
823 iemAImpl_shr_u16,
824 iemAImpl_shr_u32,
825 iemAImpl_shr_u64
826};
827
828/** Function table for the SAR instruction. */
829IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
830{
831 iemAImpl_sar_u8,
832 iemAImpl_sar_u16,
833 iemAImpl_sar_u32,
834 iemAImpl_sar_u64
835};
836
837
838/** Function table for the MUL instruction. */
839IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
840{
841 iemAImpl_mul_u8,
842 iemAImpl_mul_u16,
843 iemAImpl_mul_u32,
844 iemAImpl_mul_u64
845};
846
847/** Function table for the IMUL instruction working implicitly on rAX. */
848IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
849{
850 iemAImpl_imul_u8,
851 iemAImpl_imul_u16,
852 iemAImpl_imul_u32,
853 iemAImpl_imul_u64
854};
855
856/** Function table for the DIV instruction. */
857IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
858{
859 iemAImpl_div_u8,
860 iemAImpl_div_u16,
861 iemAImpl_div_u32,
862 iemAImpl_div_u64
863};
864
865/** Function table for the MUL instruction. */
866IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
867{
868 iemAImpl_idiv_u8,
869 iemAImpl_idiv_u16,
870 iemAImpl_idiv_u32,
871 iemAImpl_idiv_u64
872};
873
874/** Function table for the SHLD instruction */
875IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
876{
877 iemAImpl_shld_u16,
878 iemAImpl_shld_u32,
879 iemAImpl_shld_u64,
880};
881
882/** Function table for the SHRD instruction */
883IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
884{
885 iemAImpl_shrd_u16,
886 iemAImpl_shrd_u32,
887 iemAImpl_shrd_u64,
888};
889
890
891/** Function table for the PUNPCKLBW instruction */
892IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
893/** Function table for the PUNPCKLBD instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
895/** Function table for the PUNPCKLDQ instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
897/** Function table for the PUNPCKLQDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
899
900/** Function table for the PUNPCKHBW instruction */
901IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
902/** Function table for the PUNPCKHBD instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
904/** Function table for the PUNPCKHDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
906/** Function table for the PUNPCKHQDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
908
909/** Function table for the PXOR instruction */
910IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
911/** Function table for the PCMPEQB instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
913/** Function table for the PCMPEQW instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
915/** Function table for the PCMPEQD instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
917
918
919#if defined(IEM_LOG_MEMORY_WRITES)
920/** What IEM just wrote. */
921uint8_t g_abIemWrote[256];
922/** How much IEM just wrote. */
923size_t g_cbIemWrote;
924#endif
925
926
927/*********************************************************************************************************************************
928* Internal Functions *
929*********************************************************************************************************************************/
930IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
934/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
935IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
946IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
947#ifdef IEM_WITH_SETJMP
948DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
953#endif
954
955IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
956IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
970IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
971DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
972DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
973
974#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
975IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
981IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
982#endif
983
984#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
985IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
986IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
987#endif
988
989
990/**
991 * Sets the pass up status.
992 *
993 * @returns VINF_SUCCESS.
994 * @param pVCpu The cross context virtual CPU structure of the
995 * calling thread.
996 * @param rcPassUp The pass up status. Must be informational.
997 * VINF_SUCCESS is not allowed.
998 */
999IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1000{
1001 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1002
1003 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1004 if (rcOldPassUp == VINF_SUCCESS)
1005 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1006 /* If both are EM scheduling codes, use EM priority rules. */
1007 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1008 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1009 {
1010 if (rcPassUp < rcOldPassUp)
1011 {
1012 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1013 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1014 }
1015 else
1016 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1017 }
1018 /* Override EM scheduling with specific status code. */
1019 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1020 {
1021 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1023 }
1024 /* Don't override specific status code, first come first served. */
1025 else
1026 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 return VINF_SUCCESS;
1028}
1029
1030
1031/**
1032 * Calculates the CPU mode.
1033 *
1034 * This is mainly for updating IEMCPU::enmCpuMode.
1035 *
1036 * @returns CPU mode.
1037 * @param pVCpu The cross context virtual CPU structure of the
1038 * calling thread.
1039 */
1040DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1041{
1042 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1043 return IEMMODE_64BIT;
1044 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1045 return IEMMODE_32BIT;
1046 return IEMMODE_16BIT;
1047}
1048
1049
1050/**
1051 * Initializes the execution state.
1052 *
1053 * @param pVCpu The cross context virtual CPU structure of the
1054 * calling thread.
1055 * @param fBypassHandlers Whether to bypass access handlers.
1056 *
1057 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1058 * side-effects in strict builds.
1059 */
1060DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1061{
1062 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1072
1073 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1074 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1075#ifdef VBOX_STRICT
1076 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1077 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1078 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1079 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1080 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1081 pVCpu->iem.s.uRexReg = 127;
1082 pVCpu->iem.s.uRexB = 127;
1083 pVCpu->iem.s.offModRm = 127;
1084 pVCpu->iem.s.uRexIndex = 127;
1085 pVCpu->iem.s.iEffSeg = 127;
1086 pVCpu->iem.s.idxPrefix = 127;
1087 pVCpu->iem.s.uVex3rdReg = 127;
1088 pVCpu->iem.s.uVexLength = 127;
1089 pVCpu->iem.s.fEvexStuff = 127;
1090 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1091# ifdef IEM_WITH_CODE_TLB
1092 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1093 pVCpu->iem.s.pbInstrBuf = NULL;
1094 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1095 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1096 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1097 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1098# else
1099 pVCpu->iem.s.offOpcode = 127;
1100 pVCpu->iem.s.cbOpcode = 127;
1101# endif
1102#endif
1103
1104 pVCpu->iem.s.cActiveMappings = 0;
1105 pVCpu->iem.s.iNextMapping = 0;
1106 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1107 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1108#if 0
1109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1110 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1111 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1112 {
1113 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1114 Assert(pVmcs);
1115 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1116 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1117 {
1118 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1119 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1120 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1121 AssertRC(rc);
1122 }
1123 }
1124#endif
1125#endif
1126}
1127
1128#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1129/**
1130 * Performs a minimal reinitialization of the execution state.
1131 *
1132 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1133 * 'world-switch' types operations on the CPU. Currently only nested
1134 * hardware-virtualization uses it.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1137 */
1138IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1139{
1140 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1141 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1142
1143 pVCpu->iem.s.uCpl = uCpl;
1144 pVCpu->iem.s.enmCpuMode = enmMode;
1145 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffAddrMode = enmMode;
1147 if (enmMode != IEMMODE_64BIT)
1148 {
1149 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1150 pVCpu->iem.s.enmEffOpSize = enmMode;
1151 }
1152 else
1153 {
1154 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1155 pVCpu->iem.s.enmEffOpSize = enmMode;
1156 }
1157 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1158#ifndef IEM_WITH_CODE_TLB
1159 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1160 pVCpu->iem.s.offOpcode = 0;
1161 pVCpu->iem.s.cbOpcode = 0;
1162#endif
1163 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1164}
1165#endif
1166
1167/**
1168 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure of the
1171 * calling thread.
1172 */
1173DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1174{
1175 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1176#ifdef VBOX_STRICT
1177# ifdef IEM_WITH_CODE_TLB
1178 NOREF(pVCpu);
1179# else
1180 pVCpu->iem.s.cbOpcode = 0;
1181# endif
1182#else
1183 NOREF(pVCpu);
1184#endif
1185}
1186
1187
1188/**
1189 * Initializes the decoder state.
1190 *
1191 * iemReInitDecoder is mostly a copy of this function.
1192 *
1193 * @param pVCpu The cross context virtual CPU structure of the
1194 * calling thread.
1195 * @param fBypassHandlers Whether to bypass access handlers.
1196 * @param fDisregardLock Whether to disregard the LOCK prefix.
1197 */
1198DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1199{
1200 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1201 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1210
1211 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1212 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1213 pVCpu->iem.s.enmCpuMode = enmMode;
1214 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1215 pVCpu->iem.s.enmEffAddrMode = enmMode;
1216 if (enmMode != IEMMODE_64BIT)
1217 {
1218 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1219 pVCpu->iem.s.enmEffOpSize = enmMode;
1220 }
1221 else
1222 {
1223 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1224 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1225 }
1226 pVCpu->iem.s.fPrefixes = 0;
1227 pVCpu->iem.s.uRexReg = 0;
1228 pVCpu->iem.s.uRexB = 0;
1229 pVCpu->iem.s.uRexIndex = 0;
1230 pVCpu->iem.s.idxPrefix = 0;
1231 pVCpu->iem.s.uVex3rdReg = 0;
1232 pVCpu->iem.s.uVexLength = 0;
1233 pVCpu->iem.s.fEvexStuff = 0;
1234 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1235#ifdef IEM_WITH_CODE_TLB
1236 pVCpu->iem.s.pbInstrBuf = NULL;
1237 pVCpu->iem.s.offInstrNextByte = 0;
1238 pVCpu->iem.s.offCurInstrStart = 0;
1239# ifdef VBOX_STRICT
1240 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1241 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1242 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1243# endif
1244#else
1245 pVCpu->iem.s.offOpcode = 0;
1246 pVCpu->iem.s.cbOpcode = 0;
1247#endif
1248 pVCpu->iem.s.offModRm = 0;
1249 pVCpu->iem.s.cActiveMappings = 0;
1250 pVCpu->iem.s.iNextMapping = 0;
1251 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1252 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1253 pVCpu->iem.s.fDisregardLock = fDisregardLock;
1254
1255#ifdef DBGFTRACE_ENABLED
1256 switch (enmMode)
1257 {
1258 case IEMMODE_64BIT:
1259 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1260 break;
1261 case IEMMODE_32BIT:
1262 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1263 break;
1264 case IEMMODE_16BIT:
1265 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1266 break;
1267 }
1268#endif
1269}
1270
1271
1272/**
1273 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1274 *
1275 * This is mostly a copy of iemInitDecoder.
1276 *
1277 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1278 */
1279DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1280{
1281 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1284 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1285 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1286 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1289 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1290
1291 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1292 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1293 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1294 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1295 pVCpu->iem.s.enmEffAddrMode = enmMode;
1296 if (enmMode != IEMMODE_64BIT)
1297 {
1298 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1299 pVCpu->iem.s.enmEffOpSize = enmMode;
1300 }
1301 else
1302 {
1303 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1304 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1305 }
1306 pVCpu->iem.s.fPrefixes = 0;
1307 pVCpu->iem.s.uRexReg = 0;
1308 pVCpu->iem.s.uRexB = 0;
1309 pVCpu->iem.s.uRexIndex = 0;
1310 pVCpu->iem.s.idxPrefix = 0;
1311 pVCpu->iem.s.uVex3rdReg = 0;
1312 pVCpu->iem.s.uVexLength = 0;
1313 pVCpu->iem.s.fEvexStuff = 0;
1314 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1315#ifdef IEM_WITH_CODE_TLB
1316 if (pVCpu->iem.s.pbInstrBuf)
1317 {
1318 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1319 - pVCpu->iem.s.uInstrBufPc;
1320 if (off < pVCpu->iem.s.cbInstrBufTotal)
1321 {
1322 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1323 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1324 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1325 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1326 else
1327 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1328 }
1329 else
1330 {
1331 pVCpu->iem.s.pbInstrBuf = NULL;
1332 pVCpu->iem.s.offInstrNextByte = 0;
1333 pVCpu->iem.s.offCurInstrStart = 0;
1334 pVCpu->iem.s.cbInstrBuf = 0;
1335 pVCpu->iem.s.cbInstrBufTotal = 0;
1336 }
1337 }
1338 else
1339 {
1340 pVCpu->iem.s.offInstrNextByte = 0;
1341 pVCpu->iem.s.offCurInstrStart = 0;
1342 pVCpu->iem.s.cbInstrBuf = 0;
1343 pVCpu->iem.s.cbInstrBufTotal = 0;
1344 }
1345#else
1346 pVCpu->iem.s.cbOpcode = 0;
1347 pVCpu->iem.s.offOpcode = 0;
1348#endif
1349 pVCpu->iem.s.offModRm = 0;
1350 Assert(pVCpu->iem.s.cActiveMappings == 0);
1351 pVCpu->iem.s.iNextMapping = 0;
1352 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1353 Assert(pVCpu->iem.s.fBypassHandlers == false);
1354
1355#ifdef DBGFTRACE_ENABLED
1356 switch (enmMode)
1357 {
1358 case IEMMODE_64BIT:
1359 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1360 break;
1361 case IEMMODE_32BIT:
1362 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1363 break;
1364 case IEMMODE_16BIT:
1365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1366 break;
1367 }
1368#endif
1369}
1370
1371
1372
1373/**
1374 * Prefetch opcodes the first time when starting executing.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pVCpu The cross context virtual CPU structure of the
1378 * calling thread.
1379 * @param fBypassHandlers Whether to bypass access handlers.
1380 * @param fDisregardLock Whether to disregard LOCK prefixes.
1381 *
1382 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
1383 * store them as such.
1384 */
1385IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1386{
1387 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
1388
1389#ifdef IEM_WITH_CODE_TLB
1390 /** @todo Do ITLB lookup here. */
1391
1392#else /* !IEM_WITH_CODE_TLB */
1393
1394 /*
1395 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1396 *
1397 * First translate CS:rIP to a physical address.
1398 */
1399 uint32_t cbToTryRead;
1400 RTGCPTR GCPtrPC;
1401 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1402 {
1403 cbToTryRead = PAGE_SIZE;
1404 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1405 if (IEM_IS_CANONICAL(GCPtrPC))
1406 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1407 else
1408 return iemRaiseGeneralProtectionFault0(pVCpu);
1409 }
1410 else
1411 {
1412 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1413 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1414 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1415 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1416 else
1417 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1418 if (cbToTryRead) { /* likely */ }
1419 else /* overflowed */
1420 {
1421 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1422 cbToTryRead = UINT32_MAX;
1423 }
1424 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1425 Assert(GCPtrPC <= UINT32_MAX);
1426 }
1427
1428 RTGCPHYS GCPhys;
1429 uint64_t fFlags;
1430 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1431 if (RT_SUCCESS(rc)) { /* probable */ }
1432 else
1433 {
1434 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1435 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1436 }
1437 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1438 else
1439 {
1440 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1441 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1442 }
1443 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1444 else
1445 {
1446 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1448 }
1449 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1450 /** @todo Check reserved bits and such stuff. PGM is better at doing
1451 * that, so do it when implementing the guest virtual address
1452 * TLB... */
1453
1454 /*
1455 * Read the bytes at this address.
1456 */
1457 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1458 if (cbToTryRead > cbLeftOnPage)
1459 cbToTryRead = cbLeftOnPage;
1460 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1461 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1462
1463 if (!pVCpu->iem.s.fBypassHandlers)
1464 {
1465 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1467 { /* likely */ }
1468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1469 {
1470 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1473 }
1474 else
1475 {
1476 Log((RT_SUCCESS(rcStrict)
1477 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1478 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1479 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1480 return rcStrict;
1481 }
1482 }
1483 else
1484 {
1485 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1486 if (RT_SUCCESS(rc))
1487 { /* likely */ }
1488 else
1489 {
1490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1491 GCPtrPC, GCPhys, rc, cbToTryRead));
1492 return rc;
1493 }
1494 }
1495 pVCpu->iem.s.cbOpcode = cbToTryRead;
1496#endif /* !IEM_WITH_CODE_TLB */
1497 return VINF_SUCCESS;
1498}
1499
1500
1501/**
1502 * Invalidates the IEM TLBs.
1503 *
1504 * This is called internally as well as by PGM when moving GC mappings.
1505 *
1506 * @returns
1507 * @param pVCpu The cross context virtual CPU structure of the calling
1508 * thread.
1509 * @param fVmm Set when PGM calls us with a remapping.
1510 */
1511VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1512{
1513#ifdef IEM_WITH_CODE_TLB
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1516 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1517 { /* very likely */ }
1518 else
1519 {
1520 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1521 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1522 while (i-- > 0)
1523 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1524 }
1525#endif
1526
1527#ifdef IEM_WITH_DATA_TLB
1528 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539 NOREF(pVCpu); NOREF(fVmm);
1540}
1541
1542
1543/**
1544 * Invalidates a page in the TLBs.
1545 *
1546 * @param pVCpu The cross context virtual CPU structure of the calling
1547 * thread.
1548 * @param GCPtr The address of the page to invalidate
1549 */
1550VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1551{
1552#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1553 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1556 uintptr_t idx = (uint8_t)GCPtr;
1557
1558# ifdef IEM_WITH_CODE_TLB
1559 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1560 {
1561 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1562 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1563 pVCpu->iem.s.cbInstrBufTotal = 0;
1564 }
1565# endif
1566
1567# ifdef IEM_WITH_DATA_TLB
1568 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1569 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1570# endif
1571#else
1572 NOREF(pVCpu); NOREF(GCPtr);
1573#endif
1574}
1575
1576
1577/**
1578 * Invalidates the host physical aspects of the IEM TLBs.
1579 *
1580 * This is called internally as well as by PGM when moving GC mappings.
1581 *
1582 * @param pVCpu The cross context virtual CPU structure of the calling
1583 * thread.
1584 */
1585VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1586{
1587#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1588 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1589
1590# ifdef IEM_WITH_CODE_TLB
1591 pVCpu->iem.s.cbInstrBufTotal = 0;
1592# endif
1593 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1594 if (uTlbPhysRev != 0)
1595 {
1596 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1597 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1598 }
1599 else
1600 {
1601 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1603
1604 unsigned i;
1605# ifdef IEM_WITH_CODE_TLB
1606 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1607 while (i-- > 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1610 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1611 }
1612# endif
1613# ifdef IEM_WITH_DATA_TLB
1614 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1615 while (i-- > 0)
1616 {
1617 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1618 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1619 }
1620# endif
1621 }
1622#else
1623 NOREF(pVCpu);
1624#endif
1625}
1626
1627
1628/**
1629 * Invalidates the host physical aspects of the IEM TLBs.
1630 *
1631 * This is called internally as well as by PGM when moving GC mappings.
1632 *
1633 * @param pVM The cross context VM structure.
1634 *
1635 * @remarks Caller holds the PGM lock.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1638{
1639 RT_NOREF_PV(pVM);
1640}
1641
1642#ifdef IEM_WITH_CODE_TLB
1643
1644/**
1645 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1646 * failure and jumps.
1647 *
1648 * We end up here for a number of reasons:
1649 * - pbInstrBuf isn't yet initialized.
1650 * - Advancing beyond the buffer boundrary (e.g. cross page).
1651 * - Advancing beyond the CS segment limit.
1652 * - Fetching from non-mappable page (e.g. MMIO).
1653 *
1654 * @param pVCpu The cross context virtual CPU structure of the
1655 * calling thread.
1656 * @param pvDst Where to return the bytes.
1657 * @param cbDst Number of bytes to read.
1658 *
1659 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1660 */
1661IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1662{
1663#ifdef IN_RING3
1664 for (;;)
1665 {
1666 Assert(cbDst <= 8);
1667 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1668
1669 /*
1670 * We might have a partial buffer match, deal with that first to make the
1671 * rest simpler. This is the first part of the cross page/buffer case.
1672 */
1673 if (pVCpu->iem.s.pbInstrBuf != NULL)
1674 {
1675 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1676 {
1677 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1678 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1679 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1680
1681 cbDst -= cbCopy;
1682 pvDst = (uint8_t *)pvDst + cbCopy;
1683 offBuf += cbCopy;
1684 pVCpu->iem.s.offInstrNextByte += offBuf;
1685 }
1686 }
1687
1688 /*
1689 * Check segment limit, figuring how much we're allowed to access at this point.
1690 *
1691 * We will fault immediately if RIP is past the segment limit / in non-canonical
1692 * territory. If we do continue, there are one or more bytes to read before we
1693 * end up in trouble and we need to do that first before faulting.
1694 */
1695 RTGCPTR GCPtrFirst;
1696 uint32_t cbMaxRead;
1697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1698 {
1699 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1700 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1701 { /* likely */ }
1702 else
1703 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1704 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1705 }
1706 else
1707 {
1708 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1709 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1710 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1711 { /* likely */ }
1712 else
1713 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1714 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1715 if (cbMaxRead != 0)
1716 { /* likely */ }
1717 else
1718 {
1719 /* Overflowed because address is 0 and limit is max. */
1720 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1721 cbMaxRead = X86_PAGE_SIZE;
1722 }
1723 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1724 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1725 if (cbMaxRead2 < cbMaxRead)
1726 cbMaxRead = cbMaxRead2;
1727 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1728 }
1729
1730 /*
1731 * Get the TLB entry for this piece of code.
1732 */
1733 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1734 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1735 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1736 if (pTlbe->uTag == uTag)
1737 {
1738 /* likely when executing lots of code, otherwise unlikely */
1739# ifdef VBOX_WITH_STATISTICS
1740 pVCpu->iem.s.CodeTlb.cTlbHits++;
1741# endif
1742 }
1743 else
1744 {
1745 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1746 RTGCPHYS GCPhys;
1747 uint64_t fFlags;
1748 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1749 if (RT_FAILURE(rc))
1750 {
1751 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1752 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1753 }
1754
1755 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1756 pTlbe->uTag = uTag;
1757 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1758 pTlbe->GCPhys = GCPhys;
1759 pTlbe->pbMappingR3 = NULL;
1760 }
1761
1762 /*
1763 * Check TLB page table level access flags.
1764 */
1765 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1766 {
1767 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1768 {
1769 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1770 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1771 }
1772 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1773 {
1774 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1775 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1776 }
1777 }
1778
1779 /*
1780 * Look up the physical page info if necessary.
1781 */
1782 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1783 { /* not necessary */ }
1784 else
1785 {
1786 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1787 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1788 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1789 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1790 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1791 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1792 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1793 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1794 }
1795
1796# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1797 /*
1798 * Try do a direct read using the pbMappingR3 pointer.
1799 */
1800 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1801 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1802 {
1803 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1804 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1805 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1806 {
1807 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1808 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1809 }
1810 else
1811 {
1812 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1813 Assert(cbInstr < cbMaxRead);
1814 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1815 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1816 }
1817 if (cbDst <= cbMaxRead)
1818 {
1819 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1820 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1821 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1822 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1823 return;
1824 }
1825 pVCpu->iem.s.pbInstrBuf = NULL;
1826
1827 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1828 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1829 }
1830 else
1831# endif
1832#if 0
1833 /*
1834 * If there is no special read handling, so we can read a bit more and
1835 * put it in the prefetch buffer.
1836 */
1837 if ( cbDst < cbMaxRead
1838 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1839 {
1840 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1841 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1842 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1843 { /* likely */ }
1844 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1845 {
1846 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1847 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1848 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1849 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1850 }
1851 else
1852 {
1853 Log((RT_SUCCESS(rcStrict)
1854 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1855 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1856 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1857 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1858 }
1859 }
1860 /*
1861 * Special read handling, so only read exactly what's needed.
1862 * This is a highly unlikely scenario.
1863 */
1864 else
1865#endif
1866 {
1867 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1868 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1869 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1870 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1871 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1872 { /* likely */ }
1873 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1874 {
1875 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1876 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1877 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1878 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1879 }
1880 else
1881 {
1882 Log((RT_SUCCESS(rcStrict)
1883 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1884 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1885 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1886 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1887 }
1888 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1889 if (cbToRead == cbDst)
1890 return;
1891 }
1892
1893 /*
1894 * More to read, loop.
1895 */
1896 cbDst -= cbMaxRead;
1897 pvDst = (uint8_t *)pvDst + cbMaxRead;
1898 }
1899#else
1900 RT_NOREF(pvDst, cbDst);
1901 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1902#endif
1903}
1904
1905#else
1906
1907/**
1908 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1909 * exception if it fails.
1910 *
1911 * @returns Strict VBox status code.
1912 * @param pVCpu The cross context virtual CPU structure of the
1913 * calling thread.
1914 * @param cbMin The minimum number of bytes relative offOpcode
1915 * that must be read.
1916 */
1917IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1918{
1919 /*
1920 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1921 *
1922 * First translate CS:rIP to a physical address.
1923 */
1924 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1925 uint32_t cbToTryRead;
1926 RTGCPTR GCPtrNext;
1927 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1928 {
1929 cbToTryRead = PAGE_SIZE;
1930 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1931 if (!IEM_IS_CANONICAL(GCPtrNext))
1932 return iemRaiseGeneralProtectionFault0(pVCpu);
1933 }
1934 else
1935 {
1936 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1937 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1938 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1939 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1940 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1941 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1942 if (!cbToTryRead) /* overflowed */
1943 {
1944 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1945 cbToTryRead = UINT32_MAX;
1946 /** @todo check out wrapping around the code segment. */
1947 }
1948 if (cbToTryRead < cbMin - cbLeft)
1949 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1950 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1951 }
1952
1953 /* Only read up to the end of the page, and make sure we don't read more
1954 than the opcode buffer can hold. */
1955 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1956 if (cbToTryRead > cbLeftOnPage)
1957 cbToTryRead = cbLeftOnPage;
1958 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1959 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1960/** @todo r=bird: Convert assertion into undefined opcode exception? */
1961 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1962
1963 RTGCPHYS GCPhys;
1964 uint64_t fFlags;
1965 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1966 if (RT_FAILURE(rc))
1967 {
1968 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1970 }
1971 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1972 {
1973 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1974 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1975 }
1976 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1977 {
1978 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1979 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1980 }
1981 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1982 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1983 /** @todo Check reserved bits and such stuff. PGM is better at doing
1984 * that, so do it when implementing the guest virtual address
1985 * TLB... */
1986
1987 /*
1988 * Read the bytes at this address.
1989 *
1990 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1991 * and since PATM should only patch the start of an instruction there
1992 * should be no need to check again here.
1993 */
1994 if (!pVCpu->iem.s.fBypassHandlers)
1995 {
1996 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1997 cbToTryRead, PGMACCESSORIGIN_IEM);
1998 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1999 { /* likely */ }
2000 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2001 {
2002 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2003 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2004 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2005 }
2006 else
2007 {
2008 Log((RT_SUCCESS(rcStrict)
2009 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2010 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2011 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2012 return rcStrict;
2013 }
2014 }
2015 else
2016 {
2017 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2018 if (RT_SUCCESS(rc))
2019 { /* likely */ }
2020 else
2021 {
2022 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2023 return rc;
2024 }
2025 }
2026 pVCpu->iem.s.cbOpcode += cbToTryRead;
2027 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2028
2029 return VINF_SUCCESS;
2030}
2031
2032#endif /* !IEM_WITH_CODE_TLB */
2033#ifndef IEM_WITH_SETJMP
2034
2035/**
2036 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2037 *
2038 * @returns Strict VBox status code.
2039 * @param pVCpu The cross context virtual CPU structure of the
2040 * calling thread.
2041 * @param pb Where to return the opcode byte.
2042 */
2043DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2044{
2045 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2046 if (rcStrict == VINF_SUCCESS)
2047 {
2048 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2049 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2050 pVCpu->iem.s.offOpcode = offOpcode + 1;
2051 }
2052 else
2053 *pb = 0;
2054 return rcStrict;
2055}
2056
2057
2058/**
2059 * Fetches the next opcode byte.
2060 *
2061 * @returns Strict VBox status code.
2062 * @param pVCpu The cross context virtual CPU structure of the
2063 * calling thread.
2064 * @param pu8 Where to return the opcode byte.
2065 */
2066DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2067{
2068 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2069 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2070 {
2071 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2072 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2073 return VINF_SUCCESS;
2074 }
2075 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2076}
2077
2078#else /* IEM_WITH_SETJMP */
2079
2080/**
2081 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2082 *
2083 * @returns The opcode byte.
2084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2085 */
2086DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2087{
2088# ifdef IEM_WITH_CODE_TLB
2089 uint8_t u8;
2090 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2091 return u8;
2092# else
2093 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2094 if (rcStrict == VINF_SUCCESS)
2095 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2096 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2097# endif
2098}
2099
2100
2101/**
2102 * Fetches the next opcode byte, longjmp on error.
2103 *
2104 * @returns The opcode byte.
2105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2106 */
2107DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2108{
2109# ifdef IEM_WITH_CODE_TLB
2110 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2111 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2112 if (RT_LIKELY( pbBuf != NULL
2113 && offBuf < pVCpu->iem.s.cbInstrBuf))
2114 {
2115 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2116 return pbBuf[offBuf];
2117 }
2118# else
2119 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2120 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2121 {
2122 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2123 return pVCpu->iem.s.abOpcode[offOpcode];
2124 }
2125# endif
2126 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2127}
2128
2129#endif /* IEM_WITH_SETJMP */
2130
2131/**
2132 * Fetches the next opcode byte, returns automatically on failure.
2133 *
2134 * @param a_pu8 Where to return the opcode byte.
2135 * @remark Implicitly references pVCpu.
2136 */
2137#ifndef IEM_WITH_SETJMP
2138# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2139 do \
2140 { \
2141 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2142 if (rcStrict2 == VINF_SUCCESS) \
2143 { /* likely */ } \
2144 else \
2145 return rcStrict2; \
2146 } while (0)
2147#else
2148# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2149#endif /* IEM_WITH_SETJMP */
2150
2151
2152#ifndef IEM_WITH_SETJMP
2153/**
2154 * Fetches the next signed byte from the opcode stream.
2155 *
2156 * @returns Strict VBox status code.
2157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2158 * @param pi8 Where to return the signed byte.
2159 */
2160DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2161{
2162 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2163}
2164#endif /* !IEM_WITH_SETJMP */
2165
2166
2167/**
2168 * Fetches the next signed byte from the opcode stream, returning automatically
2169 * on failure.
2170 *
2171 * @param a_pi8 Where to return the signed byte.
2172 * @remark Implicitly references pVCpu.
2173 */
2174#ifndef IEM_WITH_SETJMP
2175# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2176 do \
2177 { \
2178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2179 if (rcStrict2 != VINF_SUCCESS) \
2180 return rcStrict2; \
2181 } while (0)
2182#else /* IEM_WITH_SETJMP */
2183# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2184
2185#endif /* IEM_WITH_SETJMP */
2186
2187#ifndef IEM_WITH_SETJMP
2188
2189/**
2190 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2191 *
2192 * @returns Strict VBox status code.
2193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2194 * @param pu16 Where to return the opcode dword.
2195 */
2196DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2197{
2198 uint8_t u8;
2199 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2200 if (rcStrict == VINF_SUCCESS)
2201 *pu16 = (int8_t)u8;
2202 return rcStrict;
2203}
2204
2205
2206/**
2207 * Fetches the next signed byte from the opcode stream, extending it to
2208 * unsigned 16-bit.
2209 *
2210 * @returns Strict VBox status code.
2211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2212 * @param pu16 Where to return the unsigned word.
2213 */
2214DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2215{
2216 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2217 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2218 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2219
2220 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2221 pVCpu->iem.s.offOpcode = offOpcode + 1;
2222 return VINF_SUCCESS;
2223}
2224
2225#endif /* !IEM_WITH_SETJMP */
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream and sign-extending it to
2229 * a word, returning automatically on failure.
2230 *
2231 * @param a_pu16 Where to return the word.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else
2243# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244#endif
2245
2246#ifndef IEM_WITH_SETJMP
2247
2248/**
2249 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2250 *
2251 * @returns Strict VBox status code.
2252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2253 * @param pu32 Where to return the opcode dword.
2254 */
2255DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2256{
2257 uint8_t u8;
2258 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2259 if (rcStrict == VINF_SUCCESS)
2260 *pu32 = (int8_t)u8;
2261 return rcStrict;
2262}
2263
2264
2265/**
2266 * Fetches the next signed byte from the opcode stream, extending it to
2267 * unsigned 32-bit.
2268 *
2269 * @returns Strict VBox status code.
2270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2271 * @param pu32 Where to return the unsigned dword.
2272 */
2273DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2274{
2275 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2276 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2277 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2278
2279 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2280 pVCpu->iem.s.offOpcode = offOpcode + 1;
2281 return VINF_SUCCESS;
2282}
2283
2284#endif /* !IEM_WITH_SETJMP */
2285
2286/**
2287 * Fetches the next signed byte from the opcode stream and sign-extending it to
2288 * a word, returning automatically on failure.
2289 *
2290 * @param a_pu32 Where to return the word.
2291 * @remark Implicitly references pVCpu.
2292 */
2293#ifndef IEM_WITH_SETJMP
2294#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2295 do \
2296 { \
2297 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2298 if (rcStrict2 != VINF_SUCCESS) \
2299 return rcStrict2; \
2300 } while (0)
2301#else
2302# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2303#endif
2304
2305#ifndef IEM_WITH_SETJMP
2306
2307/**
2308 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2309 *
2310 * @returns Strict VBox status code.
2311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2312 * @param pu64 Where to return the opcode qword.
2313 */
2314DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2315{
2316 uint8_t u8;
2317 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2318 if (rcStrict == VINF_SUCCESS)
2319 *pu64 = (int8_t)u8;
2320 return rcStrict;
2321}
2322
2323
2324/**
2325 * Fetches the next signed byte from the opcode stream, extending it to
2326 * unsigned 64-bit.
2327 *
2328 * @returns Strict VBox status code.
2329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2330 * @param pu64 Where to return the unsigned qword.
2331 */
2332DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2333{
2334 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2335 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2336 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2337
2338 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2339 pVCpu->iem.s.offOpcode = offOpcode + 1;
2340 return VINF_SUCCESS;
2341}
2342
2343#endif /* !IEM_WITH_SETJMP */
2344
2345
2346/**
2347 * Fetches the next signed byte from the opcode stream and sign-extending it to
2348 * a word, returning automatically on failure.
2349 *
2350 * @param a_pu64 Where to return the word.
2351 * @remark Implicitly references pVCpu.
2352 */
2353#ifndef IEM_WITH_SETJMP
2354# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2355 do \
2356 { \
2357 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2358 if (rcStrict2 != VINF_SUCCESS) \
2359 return rcStrict2; \
2360 } while (0)
2361#else
2362# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2363#endif
2364
2365
2366#ifndef IEM_WITH_SETJMP
2367/**
2368 * Fetches the next opcode byte.
2369 *
2370 * @returns Strict VBox status code.
2371 * @param pVCpu The cross context virtual CPU structure of the
2372 * calling thread.
2373 * @param pu8 Where to return the opcode byte.
2374 */
2375DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2376{
2377 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2378 pVCpu->iem.s.offModRm = offOpcode;
2379 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2380 {
2381 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2382 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2383 return VINF_SUCCESS;
2384 }
2385 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2386}
2387#else /* IEM_WITH_SETJMP */
2388/**
2389 * Fetches the next opcode byte, longjmp on error.
2390 *
2391 * @returns The opcode byte.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 */
2394DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2395{
2396# ifdef IEM_WITH_CODE_TLB
2397 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2398 pVCpu->iem.s.offModRm = offBuf;
2399 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2400 if (RT_LIKELY( pbBuf != NULL
2401 && offBuf < pVCpu->iem.s.cbInstrBuf))
2402 {
2403 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2404 return pbBuf[offBuf];
2405 }
2406# else
2407 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2408 pVCpu->iem.s.offModRm = offOpcode;
2409 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2410 {
2411 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2412 return pVCpu->iem.s.abOpcode[offOpcode];
2413 }
2414# endif
2415 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2416}
2417#endif /* IEM_WITH_SETJMP */
2418
2419/**
2420 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2421 * on failure.
2422 *
2423 * Will note down the position of the ModR/M byte for VT-x exits.
2424 *
2425 * @param a_pbRm Where to return the RM opcode byte.
2426 * @remark Implicitly references pVCpu.
2427 */
2428#ifndef IEM_WITH_SETJMP
2429# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2430 do \
2431 { \
2432 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2433 if (rcStrict2 == VINF_SUCCESS) \
2434 { /* likely */ } \
2435 else \
2436 return rcStrict2; \
2437 } while (0)
2438#else
2439# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2440#endif /* IEM_WITH_SETJMP */
2441
2442
2443#ifndef IEM_WITH_SETJMP
2444
2445/**
2446 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2447 *
2448 * @returns Strict VBox status code.
2449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2450 * @param pu16 Where to return the opcode word.
2451 */
2452DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2453{
2454 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2455 if (rcStrict == VINF_SUCCESS)
2456 {
2457 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2458# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2459 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2460# else
2461 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2462# endif
2463 pVCpu->iem.s.offOpcode = offOpcode + 2;
2464 }
2465 else
2466 *pu16 = 0;
2467 return rcStrict;
2468}
2469
2470
2471/**
2472 * Fetches the next opcode word.
2473 *
2474 * @returns Strict VBox status code.
2475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2476 * @param pu16 Where to return the opcode word.
2477 */
2478DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2479{
2480 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2481 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2482 {
2483 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2484# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2485 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2486# else
2487 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488# endif
2489 return VINF_SUCCESS;
2490 }
2491 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2492}
2493
2494#else /* IEM_WITH_SETJMP */
2495
2496/**
2497 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2498 *
2499 * @returns The opcode word.
2500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2501 */
2502DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2503{
2504# ifdef IEM_WITH_CODE_TLB
2505 uint16_t u16;
2506 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2507 return u16;
2508# else
2509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2510 if (rcStrict == VINF_SUCCESS)
2511 {
2512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2513 pVCpu->iem.s.offOpcode += 2;
2514# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2515 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2516# else
2517 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2518# endif
2519 }
2520 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2521# endif
2522}
2523
2524
2525/**
2526 * Fetches the next opcode word, longjmp on error.
2527 *
2528 * @returns The opcode word.
2529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2530 */
2531DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2532{
2533# ifdef IEM_WITH_CODE_TLB
2534 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2535 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2536 if (RT_LIKELY( pbBuf != NULL
2537 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2538 {
2539 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2540# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2541 return *(uint16_t const *)&pbBuf[offBuf];
2542# else
2543 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2544# endif
2545 }
2546# else
2547 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2548 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2549 {
2550 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2551# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2552 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2553# else
2554 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2555# endif
2556 }
2557# endif
2558 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2559}
2560
2561#endif /* IEM_WITH_SETJMP */
2562
2563
2564/**
2565 * Fetches the next opcode word, returns automatically on failure.
2566 *
2567 * @param a_pu16 Where to return the opcode word.
2568 * @remark Implicitly references pVCpu.
2569 */
2570#ifndef IEM_WITH_SETJMP
2571# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2572 do \
2573 { \
2574 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2575 if (rcStrict2 != VINF_SUCCESS) \
2576 return rcStrict2; \
2577 } while (0)
2578#else
2579# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2580#endif
2581
2582#ifndef IEM_WITH_SETJMP
2583
2584/**
2585 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2586 *
2587 * @returns Strict VBox status code.
2588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2589 * @param pu32 Where to return the opcode double word.
2590 */
2591DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2592{
2593 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2594 if (rcStrict == VINF_SUCCESS)
2595 {
2596 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2597 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2598 pVCpu->iem.s.offOpcode = offOpcode + 2;
2599 }
2600 else
2601 *pu32 = 0;
2602 return rcStrict;
2603}
2604
2605
2606/**
2607 * Fetches the next opcode word, zero extending it to a double word.
2608 *
2609 * @returns Strict VBox status code.
2610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2611 * @param pu32 Where to return the opcode double word.
2612 */
2613DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2614{
2615 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2616 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2617 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2618
2619 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2620 pVCpu->iem.s.offOpcode = offOpcode + 2;
2621 return VINF_SUCCESS;
2622}
2623
2624#endif /* !IEM_WITH_SETJMP */
2625
2626
2627/**
2628 * Fetches the next opcode word and zero extends it to a double word, returns
2629 * automatically on failure.
2630 *
2631 * @param a_pu32 Where to return the opcode double word.
2632 * @remark Implicitly references pVCpu.
2633 */
2634#ifndef IEM_WITH_SETJMP
2635# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2636 do \
2637 { \
2638 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2639 if (rcStrict2 != VINF_SUCCESS) \
2640 return rcStrict2; \
2641 } while (0)
2642#else
2643# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2644#endif
2645
2646#ifndef IEM_WITH_SETJMP
2647
2648/**
2649 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2650 *
2651 * @returns Strict VBox status code.
2652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2653 * @param pu64 Where to return the opcode quad word.
2654 */
2655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2656{
2657 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2658 if (rcStrict == VINF_SUCCESS)
2659 {
2660 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2661 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2662 pVCpu->iem.s.offOpcode = offOpcode + 2;
2663 }
2664 else
2665 *pu64 = 0;
2666 return rcStrict;
2667}
2668
2669
2670/**
2671 * Fetches the next opcode word, zero extending it to a quad word.
2672 *
2673 * @returns Strict VBox status code.
2674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2675 * @param pu64 Where to return the opcode quad word.
2676 */
2677DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2678{
2679 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2680 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2681 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2682
2683 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2684 pVCpu->iem.s.offOpcode = offOpcode + 2;
2685 return VINF_SUCCESS;
2686}
2687
2688#endif /* !IEM_WITH_SETJMP */
2689
2690/**
2691 * Fetches the next opcode word and zero extends it to a quad word, returns
2692 * automatically on failure.
2693 *
2694 * @param a_pu64 Where to return the opcode quad word.
2695 * @remark Implicitly references pVCpu.
2696 */
2697#ifndef IEM_WITH_SETJMP
2698# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2699 do \
2700 { \
2701 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2702 if (rcStrict2 != VINF_SUCCESS) \
2703 return rcStrict2; \
2704 } while (0)
2705#else
2706# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2707#endif
2708
2709
2710#ifndef IEM_WITH_SETJMP
2711/**
2712 * Fetches the next signed word from the opcode stream.
2713 *
2714 * @returns Strict VBox status code.
2715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2716 * @param pi16 Where to return the signed word.
2717 */
2718DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2719{
2720 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2721}
2722#endif /* !IEM_WITH_SETJMP */
2723
2724
2725/**
2726 * Fetches the next signed word from the opcode stream, returning automatically
2727 * on failure.
2728 *
2729 * @param a_pi16 Where to return the signed word.
2730 * @remark Implicitly references pVCpu.
2731 */
2732#ifndef IEM_WITH_SETJMP
2733# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2734 do \
2735 { \
2736 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2737 if (rcStrict2 != VINF_SUCCESS) \
2738 return rcStrict2; \
2739 } while (0)
2740#else
2741# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2742#endif
2743
2744#ifndef IEM_WITH_SETJMP
2745
2746/**
2747 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2748 *
2749 * @returns Strict VBox status code.
2750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2751 * @param pu32 Where to return the opcode dword.
2752 */
2753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2754{
2755 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2756 if (rcStrict == VINF_SUCCESS)
2757 {
2758 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2759# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2760 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2761# else
2762 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2763 pVCpu->iem.s.abOpcode[offOpcode + 1],
2764 pVCpu->iem.s.abOpcode[offOpcode + 2],
2765 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2766# endif
2767 pVCpu->iem.s.offOpcode = offOpcode + 4;
2768 }
2769 else
2770 *pu32 = 0;
2771 return rcStrict;
2772}
2773
2774
2775/**
2776 * Fetches the next opcode dword.
2777 *
2778 * @returns Strict VBox status code.
2779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2780 * @param pu32 Where to return the opcode double word.
2781 */
2782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2783{
2784 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2785 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2786 {
2787 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2788# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2789 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2790# else
2791 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2792 pVCpu->iem.s.abOpcode[offOpcode + 1],
2793 pVCpu->iem.s.abOpcode[offOpcode + 2],
2794 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2795# endif
2796 return VINF_SUCCESS;
2797 }
2798 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2799}
2800
2801#else /* !IEM_WITH_SETJMP */
2802
2803/**
2804 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2805 *
2806 * @returns The opcode dword.
2807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2808 */
2809DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2810{
2811# ifdef IEM_WITH_CODE_TLB
2812 uint32_t u32;
2813 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2814 return u32;
2815# else
2816 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2817 if (rcStrict == VINF_SUCCESS)
2818 {
2819 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2820 pVCpu->iem.s.offOpcode = offOpcode + 4;
2821# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2822 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2823# else
2824 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2825 pVCpu->iem.s.abOpcode[offOpcode + 1],
2826 pVCpu->iem.s.abOpcode[offOpcode + 2],
2827 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2828# endif
2829 }
2830 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2831# endif
2832}
2833
2834
2835/**
2836 * Fetches the next opcode dword, longjmp on error.
2837 *
2838 * @returns The opcode dword.
2839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2840 */
2841DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2842{
2843# ifdef IEM_WITH_CODE_TLB
2844 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2845 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2846 if (RT_LIKELY( pbBuf != NULL
2847 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2848 {
2849 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2850# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2851 return *(uint32_t const *)&pbBuf[offBuf];
2852# else
2853 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2854 pbBuf[offBuf + 1],
2855 pbBuf[offBuf + 2],
2856 pbBuf[offBuf + 3]);
2857# endif
2858 }
2859# else
2860 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2861 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2862 {
2863 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2864# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2865 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2866# else
2867 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2868 pVCpu->iem.s.abOpcode[offOpcode + 1],
2869 pVCpu->iem.s.abOpcode[offOpcode + 2],
2870 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2871# endif
2872 }
2873# endif
2874 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2875}
2876
2877#endif /* !IEM_WITH_SETJMP */
2878
2879
2880/**
2881 * Fetches the next opcode dword, returns automatically on failure.
2882 *
2883 * @param a_pu32 Where to return the opcode dword.
2884 * @remark Implicitly references pVCpu.
2885 */
2886#ifndef IEM_WITH_SETJMP
2887# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2888 do \
2889 { \
2890 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2891 if (rcStrict2 != VINF_SUCCESS) \
2892 return rcStrict2; \
2893 } while (0)
2894#else
2895# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2896#endif
2897
2898#ifndef IEM_WITH_SETJMP
2899
2900/**
2901 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2902 *
2903 * @returns Strict VBox status code.
2904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2905 * @param pu64 Where to return the opcode dword.
2906 */
2907DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2908{
2909 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2910 if (rcStrict == VINF_SUCCESS)
2911 {
2912 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2913 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2914 pVCpu->iem.s.abOpcode[offOpcode + 1],
2915 pVCpu->iem.s.abOpcode[offOpcode + 2],
2916 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2917 pVCpu->iem.s.offOpcode = offOpcode + 4;
2918 }
2919 else
2920 *pu64 = 0;
2921 return rcStrict;
2922}
2923
2924
2925/**
2926 * Fetches the next opcode dword, zero extending it to a quad word.
2927 *
2928 * @returns Strict VBox status code.
2929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2930 * @param pu64 Where to return the opcode quad word.
2931 */
2932DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2933{
2934 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2935 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2936 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2937
2938 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2939 pVCpu->iem.s.abOpcode[offOpcode + 1],
2940 pVCpu->iem.s.abOpcode[offOpcode + 2],
2941 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2942 pVCpu->iem.s.offOpcode = offOpcode + 4;
2943 return VINF_SUCCESS;
2944}
2945
2946#endif /* !IEM_WITH_SETJMP */
2947
2948
2949/**
2950 * Fetches the next opcode dword and zero extends it to a quad word, returns
2951 * automatically on failure.
2952 *
2953 * @param a_pu64 Where to return the opcode quad word.
2954 * @remark Implicitly references pVCpu.
2955 */
2956#ifndef IEM_WITH_SETJMP
2957# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2958 do \
2959 { \
2960 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2961 if (rcStrict2 != VINF_SUCCESS) \
2962 return rcStrict2; \
2963 } while (0)
2964#else
2965# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2966#endif
2967
2968
2969#ifndef IEM_WITH_SETJMP
2970/**
2971 * Fetches the next signed double word from the opcode stream.
2972 *
2973 * @returns Strict VBox status code.
2974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2975 * @param pi32 Where to return the signed double word.
2976 */
2977DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2978{
2979 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2980}
2981#endif
2982
2983/**
2984 * Fetches the next signed double word from the opcode stream, returning
2985 * automatically on failure.
2986 *
2987 * @param a_pi32 Where to return the signed double word.
2988 * @remark Implicitly references pVCpu.
2989 */
2990#ifndef IEM_WITH_SETJMP
2991# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2992 do \
2993 { \
2994 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2995 if (rcStrict2 != VINF_SUCCESS) \
2996 return rcStrict2; \
2997 } while (0)
2998#else
2999# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3000#endif
3001
3002#ifndef IEM_WITH_SETJMP
3003
3004/**
3005 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3006 *
3007 * @returns Strict VBox status code.
3008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3009 * @param pu64 Where to return the opcode qword.
3010 */
3011DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3012{
3013 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3014 if (rcStrict == VINF_SUCCESS)
3015 {
3016 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3017 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3018 pVCpu->iem.s.abOpcode[offOpcode + 1],
3019 pVCpu->iem.s.abOpcode[offOpcode + 2],
3020 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3021 pVCpu->iem.s.offOpcode = offOpcode + 4;
3022 }
3023 else
3024 *pu64 = 0;
3025 return rcStrict;
3026}
3027
3028
3029/**
3030 * Fetches the next opcode dword, sign extending it into a quad word.
3031 *
3032 * @returns Strict VBox status code.
3033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3034 * @param pu64 Where to return the opcode quad word.
3035 */
3036DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3037{
3038 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3039 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3040 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3041
3042 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3043 pVCpu->iem.s.abOpcode[offOpcode + 1],
3044 pVCpu->iem.s.abOpcode[offOpcode + 2],
3045 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3046 *pu64 = i32;
3047 pVCpu->iem.s.offOpcode = offOpcode + 4;
3048 return VINF_SUCCESS;
3049}
3050
3051#endif /* !IEM_WITH_SETJMP */
3052
3053
3054/**
3055 * Fetches the next opcode double word and sign extends it to a quad word,
3056 * returns automatically on failure.
3057 *
3058 * @param a_pu64 Where to return the opcode quad word.
3059 * @remark Implicitly references pVCpu.
3060 */
3061#ifndef IEM_WITH_SETJMP
3062# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3063 do \
3064 { \
3065 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3066 if (rcStrict2 != VINF_SUCCESS) \
3067 return rcStrict2; \
3068 } while (0)
3069#else
3070# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3071#endif
3072
3073#ifndef IEM_WITH_SETJMP
3074
3075/**
3076 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3077 *
3078 * @returns Strict VBox status code.
3079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3080 * @param pu64 Where to return the opcode qword.
3081 */
3082DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3083{
3084 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3085 if (rcStrict == VINF_SUCCESS)
3086 {
3087 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3088# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3089 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3090# else
3091 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3092 pVCpu->iem.s.abOpcode[offOpcode + 1],
3093 pVCpu->iem.s.abOpcode[offOpcode + 2],
3094 pVCpu->iem.s.abOpcode[offOpcode + 3],
3095 pVCpu->iem.s.abOpcode[offOpcode + 4],
3096 pVCpu->iem.s.abOpcode[offOpcode + 5],
3097 pVCpu->iem.s.abOpcode[offOpcode + 6],
3098 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3099# endif
3100 pVCpu->iem.s.offOpcode = offOpcode + 8;
3101 }
3102 else
3103 *pu64 = 0;
3104 return rcStrict;
3105}
3106
3107
3108/**
3109 * Fetches the next opcode qword.
3110 *
3111 * @returns Strict VBox status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param pu64 Where to return the opcode qword.
3114 */
3115DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3116{
3117 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3118 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3119 {
3120# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3121 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3122# else
3123 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3124 pVCpu->iem.s.abOpcode[offOpcode + 1],
3125 pVCpu->iem.s.abOpcode[offOpcode + 2],
3126 pVCpu->iem.s.abOpcode[offOpcode + 3],
3127 pVCpu->iem.s.abOpcode[offOpcode + 4],
3128 pVCpu->iem.s.abOpcode[offOpcode + 5],
3129 pVCpu->iem.s.abOpcode[offOpcode + 6],
3130 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3131# endif
3132 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3133 return VINF_SUCCESS;
3134 }
3135 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3136}
3137
3138#else /* IEM_WITH_SETJMP */
3139
3140/**
3141 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3142 *
3143 * @returns The opcode qword.
3144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3145 */
3146DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3147{
3148# ifdef IEM_WITH_CODE_TLB
3149 uint64_t u64;
3150 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3151 return u64;
3152# else
3153 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3154 if (rcStrict == VINF_SUCCESS)
3155 {
3156 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3157 pVCpu->iem.s.offOpcode = offOpcode + 8;
3158# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3159 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3160# else
3161 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3162 pVCpu->iem.s.abOpcode[offOpcode + 1],
3163 pVCpu->iem.s.abOpcode[offOpcode + 2],
3164 pVCpu->iem.s.abOpcode[offOpcode + 3],
3165 pVCpu->iem.s.abOpcode[offOpcode + 4],
3166 pVCpu->iem.s.abOpcode[offOpcode + 5],
3167 pVCpu->iem.s.abOpcode[offOpcode + 6],
3168 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3169# endif
3170 }
3171 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3172# endif
3173}
3174
3175
3176/**
3177 * Fetches the next opcode qword, longjmp on error.
3178 *
3179 * @returns The opcode qword.
3180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3181 */
3182DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3183{
3184# ifdef IEM_WITH_CODE_TLB
3185 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3186 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3187 if (RT_LIKELY( pbBuf != NULL
3188 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3189 {
3190 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3191# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3192 return *(uint64_t const *)&pbBuf[offBuf];
3193# else
3194 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3195 pbBuf[offBuf + 1],
3196 pbBuf[offBuf + 2],
3197 pbBuf[offBuf + 3],
3198 pbBuf[offBuf + 4],
3199 pbBuf[offBuf + 5],
3200 pbBuf[offBuf + 6],
3201 pbBuf[offBuf + 7]);
3202# endif
3203 }
3204# else
3205 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3206 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3207 {
3208 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3209# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3210 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3211# else
3212 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3213 pVCpu->iem.s.abOpcode[offOpcode + 1],
3214 pVCpu->iem.s.abOpcode[offOpcode + 2],
3215 pVCpu->iem.s.abOpcode[offOpcode + 3],
3216 pVCpu->iem.s.abOpcode[offOpcode + 4],
3217 pVCpu->iem.s.abOpcode[offOpcode + 5],
3218 pVCpu->iem.s.abOpcode[offOpcode + 6],
3219 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3220# endif
3221 }
3222# endif
3223 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3224}
3225
3226#endif /* IEM_WITH_SETJMP */
3227
3228/**
3229 * Fetches the next opcode quad word, returns automatically on failure.
3230 *
3231 * @param a_pu64 Where to return the opcode quad word.
3232 * @remark Implicitly references pVCpu.
3233 */
3234#ifndef IEM_WITH_SETJMP
3235# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3236 do \
3237 { \
3238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3239 if (rcStrict2 != VINF_SUCCESS) \
3240 return rcStrict2; \
3241 } while (0)
3242#else
3243# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3244#endif
3245
3246
3247/** @name Misc Worker Functions.
3248 * @{
3249 */
3250
3251/**
3252 * Gets the exception class for the specified exception vector.
3253 *
3254 * @returns The class of the specified exception.
3255 * @param uVector The exception vector.
3256 */
3257IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3258{
3259 Assert(uVector <= X86_XCPT_LAST);
3260 switch (uVector)
3261 {
3262 case X86_XCPT_DE:
3263 case X86_XCPT_TS:
3264 case X86_XCPT_NP:
3265 case X86_XCPT_SS:
3266 case X86_XCPT_GP:
3267 case X86_XCPT_SX: /* AMD only */
3268 return IEMXCPTCLASS_CONTRIBUTORY;
3269
3270 case X86_XCPT_PF:
3271 case X86_XCPT_VE: /* Intel only */
3272 return IEMXCPTCLASS_PAGE_FAULT;
3273
3274 case X86_XCPT_DF:
3275 return IEMXCPTCLASS_DOUBLE_FAULT;
3276 }
3277 return IEMXCPTCLASS_BENIGN;
3278}
3279
3280
3281/**
3282 * Evaluates how to handle an exception caused during delivery of another event
3283 * (exception / interrupt).
3284 *
3285 * @returns How to handle the recursive exception.
3286 * @param pVCpu The cross context virtual CPU structure of the
3287 * calling thread.
3288 * @param fPrevFlags The flags of the previous event.
3289 * @param uPrevVector The vector of the previous event.
3290 * @param fCurFlags The flags of the current exception.
3291 * @param uCurVector The vector of the current exception.
3292 * @param pfXcptRaiseInfo Where to store additional information about the
3293 * exception condition. Optional.
3294 */
3295VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3296 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3297{
3298 /*
3299 * Only CPU exceptions can be raised while delivering other events, software interrupt
3300 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3301 */
3302 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3303 Assert(pVCpu); RT_NOREF(pVCpu);
3304 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3305
3306 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3307 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3308 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3309 {
3310 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3311 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3312 {
3313 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3314 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3315 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3316 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3317 {
3318 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3319 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3320 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3321 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3322 uCurVector, pVCpu->cpum.GstCtx.cr2));
3323 }
3324 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3325 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3326 {
3327 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3328 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3329 }
3330 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3331 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3332 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3333 {
3334 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3335 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3336 }
3337 }
3338 else
3339 {
3340 if (uPrevVector == X86_XCPT_NMI)
3341 {
3342 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3343 if (uCurVector == X86_XCPT_PF)
3344 {
3345 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3346 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3347 }
3348 }
3349 else if ( uPrevVector == X86_XCPT_AC
3350 && uCurVector == X86_XCPT_AC)
3351 {
3352 enmRaise = IEMXCPTRAISE_CPU_HANG;
3353 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3354 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3355 }
3356 }
3357 }
3358 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3359 {
3360 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3361 if (uCurVector == X86_XCPT_PF)
3362 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3363 }
3364 else
3365 {
3366 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3367 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3368 }
3369
3370 if (pfXcptRaiseInfo)
3371 *pfXcptRaiseInfo = fRaiseInfo;
3372 return enmRaise;
3373}
3374
3375
3376/**
3377 * Enters the CPU shutdown state initiated by a triple fault or other
3378 * unrecoverable conditions.
3379 *
3380 * @returns Strict VBox status code.
3381 * @param pVCpu The cross context virtual CPU structure of the
3382 * calling thread.
3383 */
3384IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3385{
3386 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3387 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3388
3389 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3390 {
3391 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3392 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3393 }
3394
3395 RT_NOREF(pVCpu);
3396 return VINF_EM_TRIPLE_FAULT;
3397}
3398
3399
3400/**
3401 * Validates a new SS segment.
3402 *
3403 * @returns VBox strict status code.
3404 * @param pVCpu The cross context virtual CPU structure of the
3405 * calling thread.
3406 * @param NewSS The new SS selctor.
3407 * @param uCpl The CPL to load the stack for.
3408 * @param pDesc Where to return the descriptor.
3409 */
3410IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3411{
3412 /* Null selectors are not allowed (we're not called for dispatching
3413 interrupts with SS=0 in long mode). */
3414 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3417 return iemRaiseTaskSwitchFault0(pVCpu);
3418 }
3419
3420 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3421 if ((NewSS & X86_SEL_RPL) != uCpl)
3422 {
3423 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3424 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3425 }
3426
3427 /*
3428 * Read the descriptor.
3429 */
3430 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3431 if (rcStrict != VINF_SUCCESS)
3432 return rcStrict;
3433
3434 /*
3435 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3436 */
3437 if (!pDesc->Legacy.Gen.u1DescType)
3438 {
3439 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3440 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3441 }
3442
3443 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3444 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3445 {
3446 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3447 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3448 }
3449 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3450 {
3451 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3452 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3453 }
3454
3455 /* Is it there? */
3456 /** @todo testcase: Is this checked before the canonical / limit check below? */
3457 if (!pDesc->Legacy.Gen.u1Present)
3458 {
3459 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3460 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3461 }
3462
3463 return VINF_SUCCESS;
3464}
3465
3466
3467/**
3468 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3469 * not (kind of obsolete now).
3470 *
3471 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3472 */
3473#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3474
3475/**
3476 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3477 *
3478 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3479 * @param a_fEfl The new EFLAGS.
3480 */
3481#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3482
3483/** @} */
3484
3485
3486/** @name Raising Exceptions.
3487 *
3488 * @{
3489 */
3490
3491
3492/**
3493 * Loads the specified stack far pointer from the TSS.
3494 *
3495 * @returns VBox strict status code.
3496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3497 * @param uCpl The CPL to load the stack for.
3498 * @param pSelSS Where to return the new stack segment.
3499 * @param puEsp Where to return the new stack pointer.
3500 */
3501IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3502{
3503 VBOXSTRICTRC rcStrict;
3504 Assert(uCpl < 4);
3505
3506 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3507 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3508 {
3509 /*
3510 * 16-bit TSS (X86TSS16).
3511 */
3512 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3513 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3514 {
3515 uint32_t off = uCpl * 4 + 2;
3516 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3517 {
3518 /** @todo check actual access pattern here. */
3519 uint32_t u32Tmp = 0; /* gcc maybe... */
3520 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3521 if (rcStrict == VINF_SUCCESS)
3522 {
3523 *puEsp = RT_LOWORD(u32Tmp);
3524 *pSelSS = RT_HIWORD(u32Tmp);
3525 return VINF_SUCCESS;
3526 }
3527 }
3528 else
3529 {
3530 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3531 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3532 }
3533 break;
3534 }
3535
3536 /*
3537 * 32-bit TSS (X86TSS32).
3538 */
3539 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3540 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3541 {
3542 uint32_t off = uCpl * 8 + 4;
3543 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3544 {
3545/** @todo check actual access pattern here. */
3546 uint64_t u64Tmp;
3547 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3548 if (rcStrict == VINF_SUCCESS)
3549 {
3550 *puEsp = u64Tmp & UINT32_MAX;
3551 *pSelSS = (RTSEL)(u64Tmp >> 32);
3552 return VINF_SUCCESS;
3553 }
3554 }
3555 else
3556 {
3557 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3558 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3559 }
3560 break;
3561 }
3562
3563 default:
3564 AssertFailed();
3565 rcStrict = VERR_IEM_IPE_4;
3566 break;
3567 }
3568
3569 *puEsp = 0; /* make gcc happy */
3570 *pSelSS = 0; /* make gcc happy */
3571 return rcStrict;
3572}
3573
3574
3575/**
3576 * Loads the specified stack pointer from the 64-bit TSS.
3577 *
3578 * @returns VBox strict status code.
3579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3580 * @param uCpl The CPL to load the stack for.
3581 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3582 * @param puRsp Where to return the new stack pointer.
3583 */
3584IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3585{
3586 Assert(uCpl < 4);
3587 Assert(uIst < 8);
3588 *puRsp = 0; /* make gcc happy */
3589
3590 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3591 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3592
3593 uint32_t off;
3594 if (uIst)
3595 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3596 else
3597 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3598 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3599 {
3600 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3601 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3602 }
3603
3604 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3605}
3606
3607
3608/**
3609 * Adjust the CPU state according to the exception being raised.
3610 *
3611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3612 * @param u8Vector The exception that has been raised.
3613 */
3614DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3615{
3616 switch (u8Vector)
3617 {
3618 case X86_XCPT_DB:
3619 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3620 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3621 break;
3622 /** @todo Read the AMD and Intel exception reference... */
3623 }
3624}
3625
3626
3627/**
3628 * Implements exceptions and interrupts for real mode.
3629 *
3630 * @returns VBox strict status code.
3631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3632 * @param cbInstr The number of bytes to offset rIP by in the return
3633 * address.
3634 * @param u8Vector The interrupt / exception vector number.
3635 * @param fFlags The flags.
3636 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3637 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3638 */
3639IEM_STATIC VBOXSTRICTRC
3640iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3641 uint8_t cbInstr,
3642 uint8_t u8Vector,
3643 uint32_t fFlags,
3644 uint16_t uErr,
3645 uint64_t uCr2)
3646{
3647 NOREF(uErr); NOREF(uCr2);
3648 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3649
3650 /*
3651 * Read the IDT entry.
3652 */
3653 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3654 {
3655 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3656 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3657 }
3658 RTFAR16 Idte;
3659 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3660 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3661 {
3662 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3663 return rcStrict;
3664 }
3665
3666 /*
3667 * Push the stack frame.
3668 */
3669 uint16_t *pu16Frame;
3670 uint64_t uNewRsp;
3671 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3672 if (rcStrict != VINF_SUCCESS)
3673 return rcStrict;
3674
3675 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3676#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3677 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3678 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3679 fEfl |= UINT16_C(0xf000);
3680#endif
3681 pu16Frame[2] = (uint16_t)fEfl;
3682 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3683 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3684 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3685 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3686 return rcStrict;
3687
3688 /*
3689 * Load the vector address into cs:ip and make exception specific state
3690 * adjustments.
3691 */
3692 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3693 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3694 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3695 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3696 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3697 pVCpu->cpum.GstCtx.rip = Idte.off;
3698 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3699 IEMMISC_SET_EFL(pVCpu, fEfl);
3700
3701 /** @todo do we actually do this in real mode? */
3702 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3703 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3704
3705 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3706}
3707
3708
3709/**
3710 * Loads a NULL data selector into when coming from V8086 mode.
3711 *
3712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3713 * @param pSReg Pointer to the segment register.
3714 */
3715IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3716{
3717 pSReg->Sel = 0;
3718 pSReg->ValidSel = 0;
3719 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3720 {
3721 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3722 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3723 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3724 }
3725 else
3726 {
3727 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3728 /** @todo check this on AMD-V */
3729 pSReg->u64Base = 0;
3730 pSReg->u32Limit = 0;
3731 }
3732}
3733
3734
3735/**
3736 * Loads a segment selector during a task switch in V8086 mode.
3737 *
3738 * @param pSReg Pointer to the segment register.
3739 * @param uSel The selector value to load.
3740 */
3741IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3742{
3743 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3744 pSReg->Sel = uSel;
3745 pSReg->ValidSel = uSel;
3746 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3747 pSReg->u64Base = uSel << 4;
3748 pSReg->u32Limit = 0xffff;
3749 pSReg->Attr.u = 0xf3;
3750}
3751
3752
3753/**
3754 * Loads a NULL data selector into a selector register, both the hidden and
3755 * visible parts, in protected mode.
3756 *
3757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3758 * @param pSReg Pointer to the segment register.
3759 * @param uRpl The RPL.
3760 */
3761IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3762{
3763 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3764 * data selector in protected mode. */
3765 pSReg->Sel = uRpl;
3766 pSReg->ValidSel = uRpl;
3767 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3768 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3769 {
3770 /* VT-x (Intel 3960x) observed doing something like this. */
3771 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3772 pSReg->u32Limit = UINT32_MAX;
3773 pSReg->u64Base = 0;
3774 }
3775 else
3776 {
3777 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3778 pSReg->u32Limit = 0;
3779 pSReg->u64Base = 0;
3780 }
3781}
3782
3783
3784/**
3785 * Loads a segment selector during a task switch in protected mode.
3786 *
3787 * In this task switch scenario, we would throw \#TS exceptions rather than
3788 * \#GPs.
3789 *
3790 * @returns VBox strict status code.
3791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3792 * @param pSReg Pointer to the segment register.
3793 * @param uSel The new selector value.
3794 *
3795 * @remarks This does _not_ handle CS or SS.
3796 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3797 */
3798IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3799{
3800 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3801
3802 /* Null data selector. */
3803 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3804 {
3805 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3807 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3808 return VINF_SUCCESS;
3809 }
3810
3811 /* Fetch the descriptor. */
3812 IEMSELDESC Desc;
3813 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3814 if (rcStrict != VINF_SUCCESS)
3815 {
3816 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3817 VBOXSTRICTRC_VAL(rcStrict)));
3818 return rcStrict;
3819 }
3820
3821 /* Must be a data segment or readable code segment. */
3822 if ( !Desc.Legacy.Gen.u1DescType
3823 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3824 {
3825 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3826 Desc.Legacy.Gen.u4Type));
3827 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3828 }
3829
3830 /* Check privileges for data segments and non-conforming code segments. */
3831 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3832 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3833 {
3834 /* The RPL and the new CPL must be less than or equal to the DPL. */
3835 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3836 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3837 {
3838 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3839 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3840 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3841 }
3842 }
3843
3844 /* Is it there? */
3845 if (!Desc.Legacy.Gen.u1Present)
3846 {
3847 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3848 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3849 }
3850
3851 /* The base and limit. */
3852 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3853 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3854
3855 /*
3856 * Ok, everything checked out fine. Now set the accessed bit before
3857 * committing the result into the registers.
3858 */
3859 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3860 {
3861 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3862 if (rcStrict != VINF_SUCCESS)
3863 return rcStrict;
3864 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3865 }
3866
3867 /* Commit */
3868 pSReg->Sel = uSel;
3869 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3870 pSReg->u32Limit = cbLimit;
3871 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3872 pSReg->ValidSel = uSel;
3873 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3874 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3875 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3876
3877 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3878 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3879 return VINF_SUCCESS;
3880}
3881
3882
3883/**
3884 * Performs a task switch.
3885 *
3886 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3887 * caller is responsible for performing the necessary checks (like DPL, TSS
3888 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3889 * reference for JMP, CALL, IRET.
3890 *
3891 * If the task switch is the due to a software interrupt or hardware exception,
3892 * the caller is responsible for validating the TSS selector and descriptor. See
3893 * Intel Instruction reference for INT n.
3894 *
3895 * @returns VBox strict status code.
3896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3897 * @param enmTaskSwitch The cause of the task switch.
3898 * @param uNextEip The EIP effective after the task switch.
3899 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3900 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3901 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3902 * @param SelTSS The TSS selector of the new task.
3903 * @param pNewDescTSS Pointer to the new TSS descriptor.
3904 */
3905IEM_STATIC VBOXSTRICTRC
3906iemTaskSwitch(PVMCPUCC pVCpu,
3907 IEMTASKSWITCH enmTaskSwitch,
3908 uint32_t uNextEip,
3909 uint32_t fFlags,
3910 uint16_t uErr,
3911 uint64_t uCr2,
3912 RTSEL SelTSS,
3913 PIEMSELDESC pNewDescTSS)
3914{
3915 Assert(!IEM_IS_REAL_MODE(pVCpu));
3916 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3917 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3918
3919 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3920 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3921 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3922 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3923 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3924
3925 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3926 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3927
3928 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3929 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3930
3931 /* Update CR2 in case it's a page-fault. */
3932 /** @todo This should probably be done much earlier in IEM/PGM. See
3933 * @bugref{5653#c49}. */
3934 if (fFlags & IEM_XCPT_FLAGS_CR2)
3935 pVCpu->cpum.GstCtx.cr2 = uCr2;
3936
3937 /*
3938 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3939 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3940 */
3941 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3942 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3943 if (uNewTSSLimit < uNewTSSLimitMin)
3944 {
3945 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3946 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3947 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3948 }
3949
3950 /*
3951 * Task switches in VMX non-root mode always cause task switches.
3952 * The new TSS must have been read and validated (DPL, limits etc.) before a
3953 * task-switch VM-exit commences.
3954 *
3955 * See Intel spec. 25.4.2 "Treatment of Task Switches".
3956 */
3957 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3958 {
3959 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3960 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3961 }
3962
3963 /*
3964 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3965 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3966 */
3967 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3968 {
3969 uint32_t const uExitInfo1 = SelTSS;
3970 uint32_t uExitInfo2 = uErr;
3971 switch (enmTaskSwitch)
3972 {
3973 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3974 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3975 default: break;
3976 }
3977 if (fFlags & IEM_XCPT_FLAGS_ERR)
3978 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3979 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3980 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3981
3982 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3983 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3984 RT_NOREF2(uExitInfo1, uExitInfo2);
3985 }
3986
3987 /*
3988 * Check the current TSS limit. The last written byte to the current TSS during the
3989 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3990 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3991 *
3992 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3993 * end up with smaller than "legal" TSS limits.
3994 */
3995 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3996 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3997 if (uCurTSSLimit < uCurTSSLimitMin)
3998 {
3999 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4000 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4001 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4002 }
4003
4004 /*
4005 * Verify that the new TSS can be accessed and map it. Map only the required contents
4006 * and not the entire TSS.
4007 */
4008 void *pvNewTSS;
4009 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4010 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4011 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4012 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4013 * not perform correct translation if this happens. See Intel spec. 7.2.1
4014 * "Task-State Segment". */
4015 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4016 if (rcStrict != VINF_SUCCESS)
4017 {
4018 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4019 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4020 return rcStrict;
4021 }
4022
4023 /*
4024 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4025 */
4026 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4027 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4028 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4029 {
4030 PX86DESC pDescCurTSS;
4031 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4032 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4033 if (rcStrict != VINF_SUCCESS)
4034 {
4035 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4036 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4037 return rcStrict;
4038 }
4039
4040 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4041 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4042 if (rcStrict != VINF_SUCCESS)
4043 {
4044 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4045 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4046 return rcStrict;
4047 }
4048
4049 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4050 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4051 {
4052 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4053 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4054 u32EFlags &= ~X86_EFL_NT;
4055 }
4056 }
4057
4058 /*
4059 * Save the CPU state into the current TSS.
4060 */
4061 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4062 if (GCPtrNewTSS == GCPtrCurTSS)
4063 {
4064 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4065 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4066 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4067 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4068 pVCpu->cpum.GstCtx.ldtr.Sel));
4069 }
4070 if (fIsNewTSS386)
4071 {
4072 /*
4073 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4074 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4075 */
4076 void *pvCurTSS32;
4077 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4078 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4079 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4080 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4081 if (rcStrict != VINF_SUCCESS)
4082 {
4083 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4084 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4085 return rcStrict;
4086 }
4087
4088 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4089 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4090 pCurTSS32->eip = uNextEip;
4091 pCurTSS32->eflags = u32EFlags;
4092 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4093 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4094 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4095 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4096 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4097 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4098 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4099 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4100 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4101 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4102 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4103 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4104 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4105 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4106
4107 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4108 if (rcStrict != VINF_SUCCESS)
4109 {
4110 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4111 VBOXSTRICTRC_VAL(rcStrict)));
4112 return rcStrict;
4113 }
4114 }
4115 else
4116 {
4117 /*
4118 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4119 */
4120 void *pvCurTSS16;
4121 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4122 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4123 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4124 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4125 if (rcStrict != VINF_SUCCESS)
4126 {
4127 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4128 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4129 return rcStrict;
4130 }
4131
4132 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4133 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4134 pCurTSS16->ip = uNextEip;
4135 pCurTSS16->flags = u32EFlags;
4136 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4137 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4138 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4139 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4140 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4141 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4142 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4143 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4144 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4145 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4146 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4147 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4148
4149 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4150 if (rcStrict != VINF_SUCCESS)
4151 {
4152 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4153 VBOXSTRICTRC_VAL(rcStrict)));
4154 return rcStrict;
4155 }
4156 }
4157
4158 /*
4159 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4160 */
4161 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4162 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4163 {
4164 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4165 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4166 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4167 }
4168
4169 /*
4170 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4171 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4172 */
4173 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4174 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4175 bool fNewDebugTrap;
4176 if (fIsNewTSS386)
4177 {
4178 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4179 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4180 uNewEip = pNewTSS32->eip;
4181 uNewEflags = pNewTSS32->eflags;
4182 uNewEax = pNewTSS32->eax;
4183 uNewEcx = pNewTSS32->ecx;
4184 uNewEdx = pNewTSS32->edx;
4185 uNewEbx = pNewTSS32->ebx;
4186 uNewEsp = pNewTSS32->esp;
4187 uNewEbp = pNewTSS32->ebp;
4188 uNewEsi = pNewTSS32->esi;
4189 uNewEdi = pNewTSS32->edi;
4190 uNewES = pNewTSS32->es;
4191 uNewCS = pNewTSS32->cs;
4192 uNewSS = pNewTSS32->ss;
4193 uNewDS = pNewTSS32->ds;
4194 uNewFS = pNewTSS32->fs;
4195 uNewGS = pNewTSS32->gs;
4196 uNewLdt = pNewTSS32->selLdt;
4197 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4198 }
4199 else
4200 {
4201 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4202 uNewCr3 = 0;
4203 uNewEip = pNewTSS16->ip;
4204 uNewEflags = pNewTSS16->flags;
4205 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4206 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4207 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4208 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4209 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4210 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4211 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4212 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4213 uNewES = pNewTSS16->es;
4214 uNewCS = pNewTSS16->cs;
4215 uNewSS = pNewTSS16->ss;
4216 uNewDS = pNewTSS16->ds;
4217 uNewFS = 0;
4218 uNewGS = 0;
4219 uNewLdt = pNewTSS16->selLdt;
4220 fNewDebugTrap = false;
4221 }
4222
4223 if (GCPtrNewTSS == GCPtrCurTSS)
4224 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4225 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4226
4227 /*
4228 * We're done accessing the new TSS.
4229 */
4230 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4231 if (rcStrict != VINF_SUCCESS)
4232 {
4233 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4234 return rcStrict;
4235 }
4236
4237 /*
4238 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4239 */
4240 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4241 {
4242 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4243 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4244 if (rcStrict != VINF_SUCCESS)
4245 {
4246 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4247 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4248 return rcStrict;
4249 }
4250
4251 /* Check that the descriptor indicates the new TSS is available (not busy). */
4252 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4253 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4254 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4255
4256 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4257 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4258 if (rcStrict != VINF_SUCCESS)
4259 {
4260 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4261 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264 }
4265
4266 /*
4267 * From this point on, we're technically in the new task. We will defer exceptions
4268 * until the completion of the task switch but before executing any instructions in the new task.
4269 */
4270 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4271 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4272 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4273 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4274 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4275 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4276 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4277
4278 /* Set the busy bit in TR. */
4279 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4280
4281 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4282 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4283 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4284 {
4285 uNewEflags |= X86_EFL_NT;
4286 }
4287
4288 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4289 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4290 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4291
4292 pVCpu->cpum.GstCtx.eip = uNewEip;
4293 pVCpu->cpum.GstCtx.eax = uNewEax;
4294 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4295 pVCpu->cpum.GstCtx.edx = uNewEdx;
4296 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4297 pVCpu->cpum.GstCtx.esp = uNewEsp;
4298 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4299 pVCpu->cpum.GstCtx.esi = uNewEsi;
4300 pVCpu->cpum.GstCtx.edi = uNewEdi;
4301
4302 uNewEflags &= X86_EFL_LIVE_MASK;
4303 uNewEflags |= X86_EFL_RA1_MASK;
4304 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4305
4306 /*
4307 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4308 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4309 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4310 */
4311 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4312 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4313
4314 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4315 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4316
4317 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4318 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4319
4320 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4321 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4322
4323 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4324 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4325
4326 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4327 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4328 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4329
4330 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4331 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4332 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4333 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4334
4335 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4336 {
4337 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4338 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4339 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4340 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4341 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4342 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4343 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4344 }
4345
4346 /*
4347 * Switch CR3 for the new task.
4348 */
4349 if ( fIsNewTSS386
4350 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4351 {
4352 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4353 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4354 AssertRCSuccessReturn(rc, rc);
4355
4356 /* Inform PGM. */
4357 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
4358 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* fPdpesMapped */);
4359 AssertRCReturn(rc, rc);
4360 /* ignore informational status codes */
4361
4362 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4363 }
4364
4365 /*
4366 * Switch LDTR for the new task.
4367 */
4368 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4369 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4370 else
4371 {
4372 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4373
4374 IEMSELDESC DescNewLdt;
4375 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4376 if (rcStrict != VINF_SUCCESS)
4377 {
4378 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4379 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4380 return rcStrict;
4381 }
4382 if ( !DescNewLdt.Legacy.Gen.u1Present
4383 || DescNewLdt.Legacy.Gen.u1DescType
4384 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4385 {
4386 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4387 uNewLdt, DescNewLdt.Legacy.u));
4388 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4389 }
4390
4391 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4392 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4393 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4394 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4395 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4396 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4397 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4399 }
4400
4401 IEMSELDESC DescSS;
4402 if (IEM_IS_V86_MODE(pVCpu))
4403 {
4404 pVCpu->iem.s.uCpl = 3;
4405 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4406 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4407 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4408 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4409 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4410 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4411
4412 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4413 DescSS.Legacy.u = 0;
4414 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4415 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4416 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4417 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4418 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4419 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4420 DescSS.Legacy.Gen.u2Dpl = 3;
4421 }
4422 else
4423 {
4424 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4425
4426 /*
4427 * Load the stack segment for the new task.
4428 */
4429 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4430 {
4431 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4432 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4433 }
4434
4435 /* Fetch the descriptor. */
4436 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4437 if (rcStrict != VINF_SUCCESS)
4438 {
4439 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4440 VBOXSTRICTRC_VAL(rcStrict)));
4441 return rcStrict;
4442 }
4443
4444 /* SS must be a data segment and writable. */
4445 if ( !DescSS.Legacy.Gen.u1DescType
4446 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4447 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4448 {
4449 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4450 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4451 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4452 }
4453
4454 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4455 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4456 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4457 {
4458 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4459 uNewCpl));
4460 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4461 }
4462
4463 /* Is it there? */
4464 if (!DescSS.Legacy.Gen.u1Present)
4465 {
4466 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4467 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4468 }
4469
4470 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4471 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4472
4473 /* Set the accessed bit before committing the result into SS. */
4474 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4475 {
4476 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4477 if (rcStrict != VINF_SUCCESS)
4478 return rcStrict;
4479 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4480 }
4481
4482 /* Commit SS. */
4483 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4484 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4485 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4486 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4487 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4488 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4489 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4490
4491 /* CPL has changed, update IEM before loading rest of segments. */
4492 pVCpu->iem.s.uCpl = uNewCpl;
4493
4494 /*
4495 * Load the data segments for the new task.
4496 */
4497 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4498 if (rcStrict != VINF_SUCCESS)
4499 return rcStrict;
4500 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4501 if (rcStrict != VINF_SUCCESS)
4502 return rcStrict;
4503 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4504 if (rcStrict != VINF_SUCCESS)
4505 return rcStrict;
4506 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4507 if (rcStrict != VINF_SUCCESS)
4508 return rcStrict;
4509
4510 /*
4511 * Load the code segment for the new task.
4512 */
4513 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4514 {
4515 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4516 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4517 }
4518
4519 /* Fetch the descriptor. */
4520 IEMSELDESC DescCS;
4521 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4522 if (rcStrict != VINF_SUCCESS)
4523 {
4524 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4525 return rcStrict;
4526 }
4527
4528 /* CS must be a code segment. */
4529 if ( !DescCS.Legacy.Gen.u1DescType
4530 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4531 {
4532 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4533 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4534 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4535 }
4536
4537 /* For conforming CS, DPL must be less than or equal to the RPL. */
4538 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4539 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4540 {
4541 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4542 DescCS.Legacy.Gen.u2Dpl));
4543 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 /* For non-conforming CS, DPL must match RPL. */
4547 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4548 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4549 {
4550 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4551 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4552 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4553 }
4554
4555 /* Is it there? */
4556 if (!DescCS.Legacy.Gen.u1Present)
4557 {
4558 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4559 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4560 }
4561
4562 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4563 u64Base = X86DESC_BASE(&DescCS.Legacy);
4564
4565 /* Set the accessed bit before committing the result into CS. */
4566 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4567 {
4568 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4569 if (rcStrict != VINF_SUCCESS)
4570 return rcStrict;
4571 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4572 }
4573
4574 /* Commit CS. */
4575 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4576 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4577 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4578 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4579 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4580 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4582 }
4583
4584 /** @todo Debug trap. */
4585 if (fIsNewTSS386 && fNewDebugTrap)
4586 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4587
4588 /*
4589 * Construct the error code masks based on what caused this task switch.
4590 * See Intel Instruction reference for INT.
4591 */
4592 uint16_t uExt;
4593 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4594 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4595 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4596 {
4597 uExt = 1;
4598 }
4599 else
4600 uExt = 0;
4601
4602 /*
4603 * Push any error code on to the new stack.
4604 */
4605 if (fFlags & IEM_XCPT_FLAGS_ERR)
4606 {
4607 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4608 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4609 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4610
4611 /* Check that there is sufficient space on the stack. */
4612 /** @todo Factor out segment limit checking for normal/expand down segments
4613 * into a separate function. */
4614 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4615 {
4616 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4617 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4618 {
4619 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4620 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4621 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4622 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4623 }
4624 }
4625 else
4626 {
4627 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4628 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4629 {
4630 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4631 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4632 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4633 }
4634 }
4635
4636
4637 if (fIsNewTSS386)
4638 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4639 else
4640 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4641 if (rcStrict != VINF_SUCCESS)
4642 {
4643 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4644 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4645 return rcStrict;
4646 }
4647 }
4648
4649 /* Check the new EIP against the new CS limit. */
4650 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4651 {
4652 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4653 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4654 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4655 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4656 }
4657
4658 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4659 pVCpu->cpum.GstCtx.ss.Sel));
4660 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4661}
4662
4663
4664/**
4665 * Implements exceptions and interrupts for protected mode.
4666 *
4667 * @returns VBox strict status code.
4668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4669 * @param cbInstr The number of bytes to offset rIP by in the return
4670 * address.
4671 * @param u8Vector The interrupt / exception vector number.
4672 * @param fFlags The flags.
4673 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4674 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4675 */
4676IEM_STATIC VBOXSTRICTRC
4677iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4678 uint8_t cbInstr,
4679 uint8_t u8Vector,
4680 uint32_t fFlags,
4681 uint16_t uErr,
4682 uint64_t uCr2)
4683{
4684 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4685
4686 /*
4687 * Read the IDT entry.
4688 */
4689 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4690 {
4691 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4693 }
4694 X86DESC Idte;
4695 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4696 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4697 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4698 {
4699 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4700 return rcStrict;
4701 }
4702 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4703 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4704 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4705
4706 /*
4707 * Check the descriptor type, DPL and such.
4708 * ASSUMES this is done in the same order as described for call-gate calls.
4709 */
4710 if (Idte.Gate.u1DescType)
4711 {
4712 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4713 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4714 }
4715 bool fTaskGate = false;
4716 uint8_t f32BitGate = true;
4717 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4718 switch (Idte.Gate.u4Type)
4719 {
4720 case X86_SEL_TYPE_SYS_UNDEFINED:
4721 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4722 case X86_SEL_TYPE_SYS_LDT:
4723 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4724 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4725 case X86_SEL_TYPE_SYS_UNDEFINED2:
4726 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4727 case X86_SEL_TYPE_SYS_UNDEFINED3:
4728 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4729 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4730 case X86_SEL_TYPE_SYS_UNDEFINED4:
4731 {
4732 /** @todo check what actually happens when the type is wrong...
4733 * esp. call gates. */
4734 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4735 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4736 }
4737
4738 case X86_SEL_TYPE_SYS_286_INT_GATE:
4739 f32BitGate = false;
4740 RT_FALL_THRU();
4741 case X86_SEL_TYPE_SYS_386_INT_GATE:
4742 fEflToClear |= X86_EFL_IF;
4743 break;
4744
4745 case X86_SEL_TYPE_SYS_TASK_GATE:
4746 fTaskGate = true;
4747#ifndef IEM_IMPLEMENTS_TASKSWITCH
4748 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4749#endif
4750 break;
4751
4752 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4753 f32BitGate = false;
4754 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4755 break;
4756
4757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4758 }
4759
4760 /* Check DPL against CPL if applicable. */
4761 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4762 {
4763 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4764 {
4765 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4766 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4767 }
4768 }
4769
4770 /* Is it there? */
4771 if (!Idte.Gate.u1Present)
4772 {
4773 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4774 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4775 }
4776
4777 /* Is it a task-gate? */
4778 if (fTaskGate)
4779 {
4780 /*
4781 * Construct the error code masks based on what caused this task switch.
4782 * See Intel Instruction reference for INT.
4783 */
4784 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4785 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4786 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4787 RTSEL SelTSS = Idte.Gate.u16Sel;
4788
4789 /*
4790 * Fetch the TSS descriptor in the GDT.
4791 */
4792 IEMSELDESC DescTSS;
4793 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4794 if (rcStrict != VINF_SUCCESS)
4795 {
4796 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4797 VBOXSTRICTRC_VAL(rcStrict)));
4798 return rcStrict;
4799 }
4800
4801 /* The TSS descriptor must be a system segment and be available (not busy). */
4802 if ( DescTSS.Legacy.Gen.u1DescType
4803 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4804 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4805 {
4806 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4807 u8Vector, SelTSS, DescTSS.Legacy.au64));
4808 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4809 }
4810
4811 /* The TSS must be present. */
4812 if (!DescTSS.Legacy.Gen.u1Present)
4813 {
4814 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4815 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4816 }
4817
4818 /* Do the actual task switch. */
4819 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4820 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4821 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4822 }
4823
4824 /* A null CS is bad. */
4825 RTSEL NewCS = Idte.Gate.u16Sel;
4826 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4827 {
4828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4829 return iemRaiseGeneralProtectionFault0(pVCpu);
4830 }
4831
4832 /* Fetch the descriptor for the new CS. */
4833 IEMSELDESC DescCS;
4834 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4835 if (rcStrict != VINF_SUCCESS)
4836 {
4837 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4838 return rcStrict;
4839 }
4840
4841 /* Must be a code segment. */
4842 if (!DescCS.Legacy.Gen.u1DescType)
4843 {
4844 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4845 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4846 }
4847 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4848 {
4849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4850 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4851 }
4852
4853 /* Don't allow lowering the privilege level. */
4854 /** @todo Does the lowering of privileges apply to software interrupts
4855 * only? This has bearings on the more-privileged or
4856 * same-privilege stack behavior further down. A testcase would
4857 * be nice. */
4858 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4859 {
4860 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4861 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4862 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4863 }
4864
4865 /* Make sure the selector is present. */
4866 if (!DescCS.Legacy.Gen.u1Present)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4869 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4870 }
4871
4872 /* Check the new EIP against the new CS limit. */
4873 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4874 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4875 ? Idte.Gate.u16OffsetLow
4876 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4877 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4878 if (uNewEip > cbLimitCS)
4879 {
4880 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4881 u8Vector, uNewEip, cbLimitCS, NewCS));
4882 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4883 }
4884 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4885
4886 /* Calc the flag image to push. */
4887 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4888 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4889 fEfl &= ~X86_EFL_RF;
4890 else
4891 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4892
4893 /* From V8086 mode only go to CPL 0. */
4894 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4895 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4896 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4897 {
4898 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4899 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4900 }
4901
4902 /*
4903 * If the privilege level changes, we need to get a new stack from the TSS.
4904 * This in turns means validating the new SS and ESP...
4905 */
4906 if (uNewCpl != pVCpu->iem.s.uCpl)
4907 {
4908 RTSEL NewSS;
4909 uint32_t uNewEsp;
4910 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4911 if (rcStrict != VINF_SUCCESS)
4912 return rcStrict;
4913
4914 IEMSELDESC DescSS;
4915 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4916 if (rcStrict != VINF_SUCCESS)
4917 return rcStrict;
4918 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4919 if (!DescSS.Legacy.Gen.u1DefBig)
4920 {
4921 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4922 uNewEsp = (uint16_t)uNewEsp;
4923 }
4924
4925 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4926
4927 /* Check that there is sufficient space for the stack frame. */
4928 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4929 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4930 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4931 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4932
4933 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4934 {
4935 if ( uNewEsp - 1 > cbLimitSS
4936 || uNewEsp < cbStackFrame)
4937 {
4938 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4939 u8Vector, NewSS, uNewEsp, cbStackFrame));
4940 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4941 }
4942 }
4943 else
4944 {
4945 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4946 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4947 {
4948 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4949 u8Vector, NewSS, uNewEsp, cbStackFrame));
4950 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4951 }
4952 }
4953
4954 /*
4955 * Start making changes.
4956 */
4957
4958 /* Set the new CPL so that stack accesses use it. */
4959 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4960 pVCpu->iem.s.uCpl = uNewCpl;
4961
4962 /* Create the stack frame. */
4963 RTPTRUNION uStackFrame;
4964 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4965 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4966 if (rcStrict != VINF_SUCCESS)
4967 return rcStrict;
4968 void * const pvStackFrame = uStackFrame.pv;
4969 if (f32BitGate)
4970 {
4971 if (fFlags & IEM_XCPT_FLAGS_ERR)
4972 *uStackFrame.pu32++ = uErr;
4973 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4974 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4975 uStackFrame.pu32[2] = fEfl;
4976 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4977 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4978 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4979 if (fEfl & X86_EFL_VM)
4980 {
4981 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4982 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4983 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4984 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4985 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4986 }
4987 }
4988 else
4989 {
4990 if (fFlags & IEM_XCPT_FLAGS_ERR)
4991 *uStackFrame.pu16++ = uErr;
4992 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4993 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4994 uStackFrame.pu16[2] = fEfl;
4995 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4996 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4997 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4998 if (fEfl & X86_EFL_VM)
4999 {
5000 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5001 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5002 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5003 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5004 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5005 }
5006 }
5007 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5008 if (rcStrict != VINF_SUCCESS)
5009 return rcStrict;
5010
5011 /* Mark the selectors 'accessed' (hope this is the correct time). */
5012 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5013 * after pushing the stack frame? (Write protect the gdt + stack to
5014 * find out.) */
5015 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5016 {
5017 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5021 }
5022
5023 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5024 {
5025 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5026 if (rcStrict != VINF_SUCCESS)
5027 return rcStrict;
5028 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5029 }
5030
5031 /*
5032 * Start comitting the register changes (joins with the DPL=CPL branch).
5033 */
5034 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5035 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5036 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5037 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5038 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5039 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5040 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5041 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5042 * SP is loaded).
5043 * Need to check the other combinations too:
5044 * - 16-bit TSS, 32-bit handler
5045 * - 32-bit TSS, 16-bit handler */
5046 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5047 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5048 else
5049 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5050
5051 if (fEfl & X86_EFL_VM)
5052 {
5053 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5054 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5055 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5056 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5057 }
5058 }
5059 /*
5060 * Same privilege, no stack change and smaller stack frame.
5061 */
5062 else
5063 {
5064 uint64_t uNewRsp;
5065 RTPTRUNION uStackFrame;
5066 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5067 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5068 if (rcStrict != VINF_SUCCESS)
5069 return rcStrict;
5070 void * const pvStackFrame = uStackFrame.pv;
5071
5072 if (f32BitGate)
5073 {
5074 if (fFlags & IEM_XCPT_FLAGS_ERR)
5075 *uStackFrame.pu32++ = uErr;
5076 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5077 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5078 uStackFrame.pu32[2] = fEfl;
5079 }
5080 else
5081 {
5082 if (fFlags & IEM_XCPT_FLAGS_ERR)
5083 *uStackFrame.pu16++ = uErr;
5084 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5085 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5086 uStackFrame.pu16[2] = fEfl;
5087 }
5088 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5089 if (rcStrict != VINF_SUCCESS)
5090 return rcStrict;
5091
5092 /* Mark the CS selector as 'accessed'. */
5093 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5094 {
5095 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5096 if (rcStrict != VINF_SUCCESS)
5097 return rcStrict;
5098 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5099 }
5100
5101 /*
5102 * Start committing the register changes (joins with the other branch).
5103 */
5104 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5105 }
5106
5107 /* ... register committing continues. */
5108 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5109 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5110 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5111 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5112 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5113 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5114
5115 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5116 fEfl &= ~fEflToClear;
5117 IEMMISC_SET_EFL(pVCpu, fEfl);
5118
5119 if (fFlags & IEM_XCPT_FLAGS_CR2)
5120 pVCpu->cpum.GstCtx.cr2 = uCr2;
5121
5122 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5123 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5124
5125 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5126}
5127
5128
5129/**
5130 * Implements exceptions and interrupts for long mode.
5131 *
5132 * @returns VBox strict status code.
5133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5134 * @param cbInstr The number of bytes to offset rIP by in the return
5135 * address.
5136 * @param u8Vector The interrupt / exception vector number.
5137 * @param fFlags The flags.
5138 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5139 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5140 */
5141IEM_STATIC VBOXSTRICTRC
5142iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5143 uint8_t cbInstr,
5144 uint8_t u8Vector,
5145 uint32_t fFlags,
5146 uint16_t uErr,
5147 uint64_t uCr2)
5148{
5149 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5150
5151 /*
5152 * Read the IDT entry.
5153 */
5154 uint16_t offIdt = (uint16_t)u8Vector << 4;
5155 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5156 {
5157 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5159 }
5160 X86DESC64 Idte;
5161#ifdef _MSC_VER /* Shut up silly compiler warning. */
5162 Idte.au64[0] = 0;
5163 Idte.au64[1] = 0;
5164#endif
5165 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5166 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5167 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5168 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5169 {
5170 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5171 return rcStrict;
5172 }
5173 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5174 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5175 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5176
5177 /*
5178 * Check the descriptor type, DPL and such.
5179 * ASSUMES this is done in the same order as described for call-gate calls.
5180 */
5181 if (Idte.Gate.u1DescType)
5182 {
5183 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5184 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5185 }
5186 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5187 switch (Idte.Gate.u4Type)
5188 {
5189 case AMD64_SEL_TYPE_SYS_INT_GATE:
5190 fEflToClear |= X86_EFL_IF;
5191 break;
5192 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5193 break;
5194
5195 default:
5196 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5197 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5198 }
5199
5200 /* Check DPL against CPL if applicable. */
5201 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5202 {
5203 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5204 {
5205 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5206 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5207 }
5208 }
5209
5210 /* Is it there? */
5211 if (!Idte.Gate.u1Present)
5212 {
5213 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5214 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5215 }
5216
5217 /* A null CS is bad. */
5218 RTSEL NewCS = Idte.Gate.u16Sel;
5219 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5220 {
5221 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5222 return iemRaiseGeneralProtectionFault0(pVCpu);
5223 }
5224
5225 /* Fetch the descriptor for the new CS. */
5226 IEMSELDESC DescCS;
5227 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5228 if (rcStrict != VINF_SUCCESS)
5229 {
5230 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5231 return rcStrict;
5232 }
5233
5234 /* Must be a 64-bit code segment. */
5235 if (!DescCS.Long.Gen.u1DescType)
5236 {
5237 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5238 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5239 }
5240 if ( !DescCS.Long.Gen.u1Long
5241 || DescCS.Long.Gen.u1DefBig
5242 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5243 {
5244 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5245 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5246 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5247 }
5248
5249 /* Don't allow lowering the privilege level. For non-conforming CS
5250 selectors, the CS.DPL sets the privilege level the trap/interrupt
5251 handler runs at. For conforming CS selectors, the CPL remains
5252 unchanged, but the CS.DPL must be <= CPL. */
5253 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5254 * when CPU in Ring-0. Result \#GP? */
5255 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5256 {
5257 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5258 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5259 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5260 }
5261
5262
5263 /* Make sure the selector is present. */
5264 if (!DescCS.Legacy.Gen.u1Present)
5265 {
5266 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5267 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5268 }
5269
5270 /* Check that the new RIP is canonical. */
5271 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5272 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5273 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5274 if (!IEM_IS_CANONICAL(uNewRip))
5275 {
5276 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5277 return iemRaiseGeneralProtectionFault0(pVCpu);
5278 }
5279
5280 /*
5281 * If the privilege level changes or if the IST isn't zero, we need to get
5282 * a new stack from the TSS.
5283 */
5284 uint64_t uNewRsp;
5285 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5286 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5287 if ( uNewCpl != pVCpu->iem.s.uCpl
5288 || Idte.Gate.u3IST != 0)
5289 {
5290 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5291 if (rcStrict != VINF_SUCCESS)
5292 return rcStrict;
5293 }
5294 else
5295 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5296 uNewRsp &= ~(uint64_t)0xf;
5297
5298 /*
5299 * Calc the flag image to push.
5300 */
5301 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5302 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5303 fEfl &= ~X86_EFL_RF;
5304 else
5305 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5306
5307 /*
5308 * Start making changes.
5309 */
5310 /* Set the new CPL so that stack accesses use it. */
5311 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5312 pVCpu->iem.s.uCpl = uNewCpl;
5313
5314 /* Create the stack frame. */
5315 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5316 RTPTRUNION uStackFrame;
5317 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5318 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5319 if (rcStrict != VINF_SUCCESS)
5320 return rcStrict;
5321 void * const pvStackFrame = uStackFrame.pv;
5322
5323 if (fFlags & IEM_XCPT_FLAGS_ERR)
5324 *uStackFrame.pu64++ = uErr;
5325 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5326 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5327 uStackFrame.pu64[2] = fEfl;
5328 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5329 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5330 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5331 if (rcStrict != VINF_SUCCESS)
5332 return rcStrict;
5333
5334 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5335 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5336 * after pushing the stack frame? (Write protect the gdt + stack to
5337 * find out.) */
5338 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5339 {
5340 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5341 if (rcStrict != VINF_SUCCESS)
5342 return rcStrict;
5343 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5344 }
5345
5346 /*
5347 * Start comitting the register changes.
5348 */
5349 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5350 * hidden registers when interrupting 32-bit or 16-bit code! */
5351 if (uNewCpl != uOldCpl)
5352 {
5353 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5354 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5355 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5356 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5357 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5358 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5359 }
5360 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5361 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5362 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5363 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5364 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5365 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5366 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5367 pVCpu->cpum.GstCtx.rip = uNewRip;
5368
5369 fEfl &= ~fEflToClear;
5370 IEMMISC_SET_EFL(pVCpu, fEfl);
5371
5372 if (fFlags & IEM_XCPT_FLAGS_CR2)
5373 pVCpu->cpum.GstCtx.cr2 = uCr2;
5374
5375 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5376 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5377
5378 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5379}
5380
5381
5382/**
5383 * Implements exceptions and interrupts.
5384 *
5385 * All exceptions and interrupts goes thru this function!
5386 *
5387 * @returns VBox strict status code.
5388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5389 * @param cbInstr The number of bytes to offset rIP by in the return
5390 * address.
5391 * @param u8Vector The interrupt / exception vector number.
5392 * @param fFlags The flags.
5393 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5394 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5395 */
5396DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5397iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5398 uint8_t cbInstr,
5399 uint8_t u8Vector,
5400 uint32_t fFlags,
5401 uint16_t uErr,
5402 uint64_t uCr2)
5403{
5404 /*
5405 * Get all the state that we might need here.
5406 */
5407 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5408 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5409
5410#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5411 /*
5412 * Flush prefetch buffer
5413 */
5414 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5415#endif
5416
5417 /*
5418 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5419 */
5420 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5421 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5422 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5423 | IEM_XCPT_FLAGS_BP_INSTR
5424 | IEM_XCPT_FLAGS_ICEBP_INSTR
5425 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5426 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5427 {
5428 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5429 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5430 u8Vector = X86_XCPT_GP;
5431 uErr = 0;
5432 }
5433#ifdef DBGFTRACE_ENABLED
5434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5435 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5436 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5437#endif
5438
5439 /*
5440 * Evaluate whether NMI blocking should be in effect.
5441 * Normally, NMI blocking is in effect whenever we inject an NMI.
5442 */
5443 bool fBlockNmi;
5444 if ( u8Vector == X86_XCPT_NMI
5445 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5446 fBlockNmi = true;
5447 else
5448 fBlockNmi = false;
5449
5450#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5451 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5452 {
5453 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5454 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5455 return rcStrict0;
5456
5457 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5458 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5459 {
5460 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5461 fBlockNmi = false;
5462 }
5463 }
5464#endif
5465
5466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5467 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5468 {
5469 /*
5470 * If the event is being injected as part of VMRUN, it isn't subject to event
5471 * intercepts in the nested-guest. However, secondary exceptions that occur
5472 * during injection of any event -are- subject to exception intercepts.
5473 *
5474 * See AMD spec. 15.20 "Event Injection".
5475 */
5476 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5477 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5478 else
5479 {
5480 /*
5481 * Check and handle if the event being raised is intercepted.
5482 */
5483 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5484 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5485 return rcStrict0;
5486 }
5487 }
5488#endif
5489
5490 /*
5491 * Set NMI blocking if necessary.
5492 */
5493 if ( fBlockNmi
5494 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5495 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5496
5497 /*
5498 * Do recursion accounting.
5499 */
5500 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5501 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5502 if (pVCpu->iem.s.cXcptRecursions == 0)
5503 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5504 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5505 else
5506 {
5507 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5508 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5509 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5510
5511 if (pVCpu->iem.s.cXcptRecursions >= 4)
5512 {
5513#ifdef DEBUG_bird
5514 AssertFailed();
5515#endif
5516 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5517 }
5518
5519 /*
5520 * Evaluate the sequence of recurring events.
5521 */
5522 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5523 NULL /* pXcptRaiseInfo */);
5524 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5525 { /* likely */ }
5526 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5527 {
5528 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5529 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5530 u8Vector = X86_XCPT_DF;
5531 uErr = 0;
5532#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5533 /* VMX nested-guest #DF intercept needs to be checked here. */
5534 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5535 {
5536 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5537 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5538 return rcStrict0;
5539 }
5540#endif
5541 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5542 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5543 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5544 }
5545 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5546 {
5547 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5548 return iemInitiateCpuShutdown(pVCpu);
5549 }
5550 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5551 {
5552 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5553 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5554 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5555 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5556 return VERR_EM_GUEST_CPU_HANG;
5557 }
5558 else
5559 {
5560 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5561 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5562 return VERR_IEM_IPE_9;
5563 }
5564
5565 /*
5566 * The 'EXT' bit is set when an exception occurs during deliver of an external
5567 * event (such as an interrupt or earlier exception)[1]. Privileged software
5568 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5569 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5570 *
5571 * [1] - Intel spec. 6.13 "Error Code"
5572 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5573 * [3] - Intel Instruction reference for INT n.
5574 */
5575 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5576 && (fFlags & IEM_XCPT_FLAGS_ERR)
5577 && u8Vector != X86_XCPT_PF
5578 && u8Vector != X86_XCPT_DF)
5579 {
5580 uErr |= X86_TRAP_ERR_EXTERNAL;
5581 }
5582 }
5583
5584 pVCpu->iem.s.cXcptRecursions++;
5585 pVCpu->iem.s.uCurXcpt = u8Vector;
5586 pVCpu->iem.s.fCurXcpt = fFlags;
5587 pVCpu->iem.s.uCurXcptErr = uErr;
5588 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5589
5590 /*
5591 * Extensive logging.
5592 */
5593#if defined(LOG_ENABLED) && defined(IN_RING3)
5594 if (LogIs3Enabled())
5595 {
5596 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5597 PVM pVM = pVCpu->CTX_SUFF(pVM);
5598 char szRegs[4096];
5599 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5600 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5601 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5602 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5603 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5604 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5605 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5606 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5607 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5608 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5609 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5610 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5611 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5612 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5613 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5614 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5615 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5616 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5617 " efer=%016VR{efer}\n"
5618 " pat=%016VR{pat}\n"
5619 " sf_mask=%016VR{sf_mask}\n"
5620 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5621 " lstar=%016VR{lstar}\n"
5622 " star=%016VR{star} cstar=%016VR{cstar}\n"
5623 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5624 );
5625
5626 char szInstr[256];
5627 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5628 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5629 szInstr, sizeof(szInstr), NULL);
5630 Log3(("%s%s\n", szRegs, szInstr));
5631 }
5632#endif /* LOG_ENABLED */
5633
5634 /*
5635 * Call the mode specific worker function.
5636 */
5637 VBOXSTRICTRC rcStrict;
5638 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5639 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5640 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5641 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5642 else
5643 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5644
5645 /* Flush the prefetch buffer. */
5646#ifdef IEM_WITH_CODE_TLB
5647 pVCpu->iem.s.pbInstrBuf = NULL;
5648#else
5649 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5650#endif
5651
5652 /*
5653 * Unwind.
5654 */
5655 pVCpu->iem.s.cXcptRecursions--;
5656 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5657 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5658 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5659 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5660 pVCpu->iem.s.cXcptRecursions + 1));
5661 return rcStrict;
5662}
5663
5664#ifdef IEM_WITH_SETJMP
5665/**
5666 * See iemRaiseXcptOrInt. Will not return.
5667 */
5668IEM_STATIC DECL_NO_RETURN(void)
5669iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5670 uint8_t cbInstr,
5671 uint8_t u8Vector,
5672 uint32_t fFlags,
5673 uint16_t uErr,
5674 uint64_t uCr2)
5675{
5676 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5677 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5678}
5679#endif
5680
5681
5682/** \#DE - 00. */
5683DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5684{
5685 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5686}
5687
5688
5689/** \#DB - 01.
5690 * @note This automatically clear DR7.GD. */
5691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5692{
5693 /** @todo set/clear RF. */
5694 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5696}
5697
5698
5699/** \#BR - 05. */
5700DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5701{
5702 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5703}
5704
5705
5706/** \#UD - 06. */
5707DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5708{
5709 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5710}
5711
5712
5713/** \#NM - 07. */
5714DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5715{
5716 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5717}
5718
5719
5720/** \#TS(err) - 0a. */
5721DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5722{
5723 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5724}
5725
5726
5727/** \#TS(tr) - 0a. */
5728DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5729{
5730 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5731 pVCpu->cpum.GstCtx.tr.Sel, 0);
5732}
5733
5734
5735/** \#TS(0) - 0a. */
5736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5737{
5738 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5739 0, 0);
5740}
5741
5742
5743/** \#TS(err) - 0a. */
5744DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5745{
5746 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5747 uSel & X86_SEL_MASK_OFF_RPL, 0);
5748}
5749
5750
5751/** \#NP(err) - 0b. */
5752DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5753{
5754 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5755}
5756
5757
5758/** \#NP(sel) - 0b. */
5759DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5760{
5761 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5762 uSel & ~X86_SEL_RPL, 0);
5763}
5764
5765
5766/** \#SS(seg) - 0c. */
5767DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5768{
5769 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5770 uSel & ~X86_SEL_RPL, 0);
5771}
5772
5773
5774/** \#SS(err) - 0c. */
5775DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5776{
5777 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5778}
5779
5780
5781/** \#GP(n) - 0d. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5785}
5786
5787
5788/** \#GP(0) - 0d. */
5789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5790{
5791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5792}
5793
5794#ifdef IEM_WITH_SETJMP
5795/** \#GP(0) - 0d. */
5796DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5797{
5798 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5799}
5800#endif
5801
5802
5803/** \#GP(sel) - 0d. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5805{
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5807 Sel & ~X86_SEL_RPL, 0);
5808}
5809
5810
5811/** \#GP(0) - 0d. */
5812DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5813{
5814 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5815}
5816
5817
5818/** \#GP(sel) - 0d. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5820{
5821 NOREF(iSegReg); NOREF(fAccess);
5822 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5823 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5824}
5825
5826#ifdef IEM_WITH_SETJMP
5827/** \#GP(sel) - 0d, longjmp. */
5828DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5829{
5830 NOREF(iSegReg); NOREF(fAccess);
5831 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5832 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5833}
5834#endif
5835
5836/** \#GP(sel) - 0d. */
5837DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5838{
5839 NOREF(Sel);
5840 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5841}
5842
5843#ifdef IEM_WITH_SETJMP
5844/** \#GP(sel) - 0d, longjmp. */
5845DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5846{
5847 NOREF(Sel);
5848 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5849}
5850#endif
5851
5852
5853/** \#GP(sel) - 0d. */
5854DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5855{
5856 NOREF(iSegReg); NOREF(fAccess);
5857 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5858}
5859
5860#ifdef IEM_WITH_SETJMP
5861/** \#GP(sel) - 0d, longjmp. */
5862DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5863 uint32_t fAccess)
5864{
5865 NOREF(iSegReg); NOREF(fAccess);
5866 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5867}
5868#endif
5869
5870
5871/** \#PF(n) - 0e. */
5872DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5873{
5874 uint16_t uErr;
5875 switch (rc)
5876 {
5877 case VERR_PAGE_NOT_PRESENT:
5878 case VERR_PAGE_TABLE_NOT_PRESENT:
5879 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5880 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5881 uErr = 0;
5882 break;
5883
5884 default:
5885 AssertMsgFailed(("%Rrc\n", rc));
5886 RT_FALL_THRU();
5887 case VERR_ACCESS_DENIED:
5888 uErr = X86_TRAP_PF_P;
5889 break;
5890
5891 /** @todo reserved */
5892 }
5893
5894 if (pVCpu->iem.s.uCpl == 3)
5895 uErr |= X86_TRAP_PF_US;
5896
5897 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5898 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5899 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5900 uErr |= X86_TRAP_PF_ID;
5901
5902#if 0 /* This is so much non-sense, really. Why was it done like that? */
5903 /* Note! RW access callers reporting a WRITE protection fault, will clear
5904 the READ flag before calling. So, read-modify-write accesses (RW)
5905 can safely be reported as READ faults. */
5906 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5907 uErr |= X86_TRAP_PF_RW;
5908#else
5909 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5910 {
5911 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
5912 /// (regardless of outcome of the comparison in the latter case).
5913 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
5914 uErr |= X86_TRAP_PF_RW;
5915 }
5916#endif
5917
5918 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5919 uErr, GCPtrWhere);
5920}
5921
5922#ifdef IEM_WITH_SETJMP
5923/** \#PF(n) - 0e, longjmp. */
5924IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5925{
5926 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5927}
5928#endif
5929
5930
5931/** \#MF(0) - 10. */
5932DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5933{
5934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5935}
5936
5937
5938/** \#AC(0) - 11. */
5939DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5940{
5941 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5942}
5943
5944
5945/**
5946 * Macro for calling iemCImplRaiseDivideError().
5947 *
5948 * This enables us to add/remove arguments and force different levels of
5949 * inlining as we wish.
5950 *
5951 * @return Strict VBox status code.
5952 */
5953#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5954IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5955{
5956 NOREF(cbInstr);
5957 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5958}
5959
5960
5961/**
5962 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5963 *
5964 * This enables us to add/remove arguments and force different levels of
5965 * inlining as we wish.
5966 *
5967 * @return Strict VBox status code.
5968 */
5969#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5970IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5971{
5972 NOREF(cbInstr);
5973 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5974}
5975
5976
5977/**
5978 * Macro for calling iemCImplRaiseInvalidOpcode().
5979 *
5980 * This enables us to add/remove arguments and force different levels of
5981 * inlining as we wish.
5982 *
5983 * @return Strict VBox status code.
5984 */
5985#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5986IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5987{
5988 NOREF(cbInstr);
5989 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5990}
5991
5992
5993/** @} */
5994
5995
5996/*
5997 *
5998 * Helpers routines.
5999 * Helpers routines.
6000 * Helpers routines.
6001 *
6002 */
6003
6004/**
6005 * Recalculates the effective operand size.
6006 *
6007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6008 */
6009IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6010{
6011 switch (pVCpu->iem.s.enmCpuMode)
6012 {
6013 case IEMMODE_16BIT:
6014 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6015 break;
6016 case IEMMODE_32BIT:
6017 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6018 break;
6019 case IEMMODE_64BIT:
6020 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6021 {
6022 case 0:
6023 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6024 break;
6025 case IEM_OP_PRF_SIZE_OP:
6026 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6027 break;
6028 case IEM_OP_PRF_SIZE_REX_W:
6029 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6030 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6031 break;
6032 }
6033 break;
6034 default:
6035 AssertFailed();
6036 }
6037}
6038
6039
6040/**
6041 * Sets the default operand size to 64-bit and recalculates the effective
6042 * operand size.
6043 *
6044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6045 */
6046IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6047{
6048 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6049 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6050 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6051 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6052 else
6053 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6054}
6055
6056
6057/*
6058 *
6059 * Common opcode decoders.
6060 * Common opcode decoders.
6061 * Common opcode decoders.
6062 *
6063 */
6064//#include <iprt/mem.h>
6065
6066/**
6067 * Used to add extra details about a stub case.
6068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6069 */
6070IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6071{
6072#if defined(LOG_ENABLED) && defined(IN_RING3)
6073 PVM pVM = pVCpu->CTX_SUFF(pVM);
6074 char szRegs[4096];
6075 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6076 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6077 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6078 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6079 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6080 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6081 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6082 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6083 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6084 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6085 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6086 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6087 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6088 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6089 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6090 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6091 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6092 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6093 " efer=%016VR{efer}\n"
6094 " pat=%016VR{pat}\n"
6095 " sf_mask=%016VR{sf_mask}\n"
6096 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6097 " lstar=%016VR{lstar}\n"
6098 " star=%016VR{star} cstar=%016VR{cstar}\n"
6099 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6100 );
6101
6102 char szInstr[256];
6103 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6104 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6105 szInstr, sizeof(szInstr), NULL);
6106
6107 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6108#else
6109 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6110#endif
6111}
6112
6113/**
6114 * Complains about a stub.
6115 *
6116 * Providing two versions of this macro, one for daily use and one for use when
6117 * working on IEM.
6118 */
6119#if 0
6120# define IEMOP_BITCH_ABOUT_STUB() \
6121 do { \
6122 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6123 iemOpStubMsg2(pVCpu); \
6124 RTAssertPanic(); \
6125 } while (0)
6126#else
6127# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6128#endif
6129
6130/** Stubs an opcode. */
6131#define FNIEMOP_STUB(a_Name) \
6132 FNIEMOP_DEF(a_Name) \
6133 { \
6134 RT_NOREF_PV(pVCpu); \
6135 IEMOP_BITCH_ABOUT_STUB(); \
6136 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6137 } \
6138 typedef int ignore_semicolon
6139
6140/** Stubs an opcode. */
6141#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6142 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6143 { \
6144 RT_NOREF_PV(pVCpu); \
6145 RT_NOREF_PV(a_Name0); \
6146 IEMOP_BITCH_ABOUT_STUB(); \
6147 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6148 } \
6149 typedef int ignore_semicolon
6150
6151/** Stubs an opcode which currently should raise \#UD. */
6152#define FNIEMOP_UD_STUB(a_Name) \
6153 FNIEMOP_DEF(a_Name) \
6154 { \
6155 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6156 return IEMOP_RAISE_INVALID_OPCODE(); \
6157 } \
6158 typedef int ignore_semicolon
6159
6160/** Stubs an opcode which currently should raise \#UD. */
6161#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6162 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6163 { \
6164 RT_NOREF_PV(pVCpu); \
6165 RT_NOREF_PV(a_Name0); \
6166 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6167 return IEMOP_RAISE_INVALID_OPCODE(); \
6168 } \
6169 typedef int ignore_semicolon
6170
6171
6172
6173/** @name Register Access.
6174 * @{
6175 */
6176
6177/**
6178 * Gets a reference (pointer) to the specified hidden segment register.
6179 *
6180 * @returns Hidden register reference.
6181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6182 * @param iSegReg The segment register.
6183 */
6184IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6185{
6186 Assert(iSegReg < X86_SREG_COUNT);
6187 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6188 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6189
6190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6191 return pSReg;
6192}
6193
6194
6195/**
6196 * Ensures that the given hidden segment register is up to date.
6197 *
6198 * @returns Hidden register reference.
6199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6200 * @param pSReg The segment register.
6201 */
6202IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6203{
6204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6205 NOREF(pVCpu);
6206 return pSReg;
6207}
6208
6209
6210/**
6211 * Gets a reference (pointer) to the specified segment register (the selector
6212 * value).
6213 *
6214 * @returns Pointer to the selector variable.
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param iSegReg The segment register.
6217 */
6218DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6219{
6220 Assert(iSegReg < X86_SREG_COUNT);
6221 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6222 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6223}
6224
6225
6226/**
6227 * Fetches the selector value of a segment register.
6228 *
6229 * @returns The selector value.
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param iSegReg The segment register.
6232 */
6233DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6234{
6235 Assert(iSegReg < X86_SREG_COUNT);
6236 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6237 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6238}
6239
6240
6241/**
6242 * Fetches the base address value of a segment register.
6243 *
6244 * @returns The selector value.
6245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6246 * @param iSegReg The segment register.
6247 */
6248DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6249{
6250 Assert(iSegReg < X86_SREG_COUNT);
6251 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6252 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6253}
6254
6255
6256/**
6257 * Gets a reference (pointer) to the specified general purpose register.
6258 *
6259 * @returns Register reference.
6260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6261 * @param iReg The general purpose register.
6262 */
6263DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6264{
6265 Assert(iReg < 16);
6266 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6267}
6268
6269
6270/**
6271 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6272 *
6273 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6274 *
6275 * @returns Register reference.
6276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6277 * @param iReg The register.
6278 */
6279DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6280{
6281 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6282 {
6283 Assert(iReg < 16);
6284 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6285 }
6286 /* high 8-bit register. */
6287 Assert(iReg < 8);
6288 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6289}
6290
6291
6292/**
6293 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6294 *
6295 * @returns Register reference.
6296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6297 * @param iReg The register.
6298 */
6299DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6300{
6301 Assert(iReg < 16);
6302 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6303}
6304
6305
6306/**
6307 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6308 *
6309 * @returns Register reference.
6310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6311 * @param iReg The register.
6312 */
6313DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6314{
6315 Assert(iReg < 16);
6316 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6317}
6318
6319
6320/**
6321 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6322 *
6323 * @returns Register reference.
6324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6325 * @param iReg The register.
6326 */
6327DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6328{
6329 Assert(iReg < 64);
6330 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6331}
6332
6333
6334/**
6335 * Gets a reference (pointer) to the specified segment register's base address.
6336 *
6337 * @returns Segment register base address reference.
6338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6339 * @param iSegReg The segment selector.
6340 */
6341DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6342{
6343 Assert(iSegReg < X86_SREG_COUNT);
6344 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6345 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6346}
6347
6348
6349/**
6350 * Fetches the value of a 8-bit general purpose register.
6351 *
6352 * @returns The register value.
6353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6354 * @param iReg The register.
6355 */
6356DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6357{
6358 return *iemGRegRefU8(pVCpu, iReg);
6359}
6360
6361
6362/**
6363 * Fetches the value of a 16-bit general purpose register.
6364 *
6365 * @returns The register value.
6366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6367 * @param iReg The register.
6368 */
6369DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6370{
6371 Assert(iReg < 16);
6372 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6373}
6374
6375
6376/**
6377 * Fetches the value of a 32-bit general purpose register.
6378 *
6379 * @returns The register value.
6380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6381 * @param iReg The register.
6382 */
6383DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6384{
6385 Assert(iReg < 16);
6386 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6387}
6388
6389
6390/**
6391 * Fetches the value of a 64-bit general purpose register.
6392 *
6393 * @returns The register value.
6394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6395 * @param iReg The register.
6396 */
6397DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6398{
6399 Assert(iReg < 16);
6400 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6401}
6402
6403
6404/**
6405 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6406 *
6407 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6408 * segment limit.
6409 *
6410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6411 * @param offNextInstr The offset of the next instruction.
6412 */
6413IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6414{
6415 switch (pVCpu->iem.s.enmEffOpSize)
6416 {
6417 case IEMMODE_16BIT:
6418 {
6419 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6420 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6421 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6422 return iemRaiseGeneralProtectionFault0(pVCpu);
6423 pVCpu->cpum.GstCtx.rip = uNewIp;
6424 break;
6425 }
6426
6427 case IEMMODE_32BIT:
6428 {
6429 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6430 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6431
6432 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6433 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6434 return iemRaiseGeneralProtectionFault0(pVCpu);
6435 pVCpu->cpum.GstCtx.rip = uNewEip;
6436 break;
6437 }
6438
6439 case IEMMODE_64BIT:
6440 {
6441 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6442
6443 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6444 if (!IEM_IS_CANONICAL(uNewRip))
6445 return iemRaiseGeneralProtectionFault0(pVCpu);
6446 pVCpu->cpum.GstCtx.rip = uNewRip;
6447 break;
6448 }
6449
6450 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6451 }
6452
6453 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6454
6455#ifndef IEM_WITH_CODE_TLB
6456 /* Flush the prefetch buffer. */
6457 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6458#endif
6459
6460 return VINF_SUCCESS;
6461}
6462
6463
6464/**
6465 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6466 *
6467 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6468 * segment limit.
6469 *
6470 * @returns Strict VBox status code.
6471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6472 * @param offNextInstr The offset of the next instruction.
6473 */
6474IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6475{
6476 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6477
6478 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6479 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6480 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6481 return iemRaiseGeneralProtectionFault0(pVCpu);
6482 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6483 pVCpu->cpum.GstCtx.rip = uNewIp;
6484 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6485
6486#ifndef IEM_WITH_CODE_TLB
6487 /* Flush the prefetch buffer. */
6488 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6489#endif
6490
6491 return VINF_SUCCESS;
6492}
6493
6494
6495/**
6496 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6497 *
6498 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6499 * segment limit.
6500 *
6501 * @returns Strict VBox status code.
6502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6503 * @param offNextInstr The offset of the next instruction.
6504 */
6505IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6506{
6507 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6508
6509 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6510 {
6511 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6512
6513 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6514 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pVCpu->cpum.GstCtx.rip = uNewEip;
6517 }
6518 else
6519 {
6520 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6521
6522 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6523 if (!IEM_IS_CANONICAL(uNewRip))
6524 return iemRaiseGeneralProtectionFault0(pVCpu);
6525 pVCpu->cpum.GstCtx.rip = uNewRip;
6526 }
6527 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6528
6529#ifndef IEM_WITH_CODE_TLB
6530 /* Flush the prefetch buffer. */
6531 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6532#endif
6533
6534 return VINF_SUCCESS;
6535}
6536
6537
6538/**
6539 * Performs a near jump to the specified address.
6540 *
6541 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6542 * segment limit.
6543 *
6544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6545 * @param uNewRip The new RIP value.
6546 */
6547IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6548{
6549 switch (pVCpu->iem.s.enmEffOpSize)
6550 {
6551 case IEMMODE_16BIT:
6552 {
6553 Assert(uNewRip <= UINT16_MAX);
6554 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6555 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6556 return iemRaiseGeneralProtectionFault0(pVCpu);
6557 /** @todo Test 16-bit jump in 64-bit mode. */
6558 pVCpu->cpum.GstCtx.rip = uNewRip;
6559 break;
6560 }
6561
6562 case IEMMODE_32BIT:
6563 {
6564 Assert(uNewRip <= UINT32_MAX);
6565 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6566 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6567
6568 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6569 return iemRaiseGeneralProtectionFault0(pVCpu);
6570 pVCpu->cpum.GstCtx.rip = uNewRip;
6571 break;
6572 }
6573
6574 case IEMMODE_64BIT:
6575 {
6576 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6577
6578 if (!IEM_IS_CANONICAL(uNewRip))
6579 return iemRaiseGeneralProtectionFault0(pVCpu);
6580 pVCpu->cpum.GstCtx.rip = uNewRip;
6581 break;
6582 }
6583
6584 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6585 }
6586
6587 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6588
6589#ifndef IEM_WITH_CODE_TLB
6590 /* Flush the prefetch buffer. */
6591 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6592#endif
6593
6594 return VINF_SUCCESS;
6595}
6596
6597
6598/**
6599 * Get the address of the top of the stack.
6600 *
6601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6602 */
6603DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6604{
6605 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6606 return pVCpu->cpum.GstCtx.rsp;
6607 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6608 return pVCpu->cpum.GstCtx.esp;
6609 return pVCpu->cpum.GstCtx.sp;
6610}
6611
6612
6613/**
6614 * Updates the RIP/EIP/IP to point to the next instruction.
6615 *
6616 * This function leaves the EFLAGS.RF flag alone.
6617 *
6618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6619 * @param cbInstr The number of bytes to add.
6620 */
6621IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6622{
6623 switch (pVCpu->iem.s.enmCpuMode)
6624 {
6625 case IEMMODE_16BIT:
6626 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6627 pVCpu->cpum.GstCtx.eip += cbInstr;
6628 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6629 break;
6630
6631 case IEMMODE_32BIT:
6632 pVCpu->cpum.GstCtx.eip += cbInstr;
6633 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6634 break;
6635
6636 case IEMMODE_64BIT:
6637 pVCpu->cpum.GstCtx.rip += cbInstr;
6638 break;
6639 default: AssertFailed();
6640 }
6641}
6642
6643
6644#if 0
6645/**
6646 * Updates the RIP/EIP/IP to point to the next instruction.
6647 *
6648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6649 */
6650IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6651{
6652 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6653}
6654#endif
6655
6656
6657
6658/**
6659 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6660 *
6661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6662 * @param cbInstr The number of bytes to add.
6663 */
6664IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6665{
6666 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6667
6668 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6669#if ARCH_BITS >= 64
6670 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6671 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6672 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6673#else
6674 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6675 pVCpu->cpum.GstCtx.rip += cbInstr;
6676 else
6677 pVCpu->cpum.GstCtx.eip += cbInstr;
6678#endif
6679}
6680
6681
6682/**
6683 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6684 *
6685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6686 */
6687IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6688{
6689 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6690}
6691
6692
6693/**
6694 * Adds to the stack pointer.
6695 *
6696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6697 * @param cbToAdd The number of bytes to add (8-bit!).
6698 */
6699DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6700{
6701 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6702 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6703 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6704 pVCpu->cpum.GstCtx.esp += cbToAdd;
6705 else
6706 pVCpu->cpum.GstCtx.sp += cbToAdd;
6707}
6708
6709
6710/**
6711 * Subtracts from the stack pointer.
6712 *
6713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6714 * @param cbToSub The number of bytes to subtract (8-bit!).
6715 */
6716DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6717{
6718 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6719 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6720 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6721 pVCpu->cpum.GstCtx.esp -= cbToSub;
6722 else
6723 pVCpu->cpum.GstCtx.sp -= cbToSub;
6724}
6725
6726
6727/**
6728 * Adds to the temporary stack pointer.
6729 *
6730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6731 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6732 * @param cbToAdd The number of bytes to add (16-bit).
6733 */
6734DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6735{
6736 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6737 pTmpRsp->u += cbToAdd;
6738 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6739 pTmpRsp->DWords.dw0 += cbToAdd;
6740 else
6741 pTmpRsp->Words.w0 += cbToAdd;
6742}
6743
6744
6745/**
6746 * Subtracts from the temporary stack pointer.
6747 *
6748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6749 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6750 * @param cbToSub The number of bytes to subtract.
6751 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6752 * expecting that.
6753 */
6754DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6755{
6756 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6757 pTmpRsp->u -= cbToSub;
6758 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6759 pTmpRsp->DWords.dw0 -= cbToSub;
6760 else
6761 pTmpRsp->Words.w0 -= cbToSub;
6762}
6763
6764
6765/**
6766 * Calculates the effective stack address for a push of the specified size as
6767 * well as the new RSP value (upper bits may be masked).
6768 *
6769 * @returns Effective stack addressf for the push.
6770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6771 * @param cbItem The size of the stack item to pop.
6772 * @param puNewRsp Where to return the new RSP value.
6773 */
6774DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6775{
6776 RTUINT64U uTmpRsp;
6777 RTGCPTR GCPtrTop;
6778 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6779
6780 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6781 GCPtrTop = uTmpRsp.u -= cbItem;
6782 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6783 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6784 else
6785 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6786 *puNewRsp = uTmpRsp.u;
6787 return GCPtrTop;
6788}
6789
6790
6791/**
6792 * Gets the current stack pointer and calculates the value after a pop of the
6793 * specified size.
6794 *
6795 * @returns Current stack pointer.
6796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6797 * @param cbItem The size of the stack item to pop.
6798 * @param puNewRsp Where to return the new RSP value.
6799 */
6800DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6801{
6802 RTUINT64U uTmpRsp;
6803 RTGCPTR GCPtrTop;
6804 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6805
6806 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6807 {
6808 GCPtrTop = uTmpRsp.u;
6809 uTmpRsp.u += cbItem;
6810 }
6811 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6812 {
6813 GCPtrTop = uTmpRsp.DWords.dw0;
6814 uTmpRsp.DWords.dw0 += cbItem;
6815 }
6816 else
6817 {
6818 GCPtrTop = uTmpRsp.Words.w0;
6819 uTmpRsp.Words.w0 += cbItem;
6820 }
6821 *puNewRsp = uTmpRsp.u;
6822 return GCPtrTop;
6823}
6824
6825
6826/**
6827 * Calculates the effective stack address for a push of the specified size as
6828 * well as the new temporary RSP value (upper bits may be masked).
6829 *
6830 * @returns Effective stack addressf for the push.
6831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6832 * @param pTmpRsp The temporary stack pointer. This is updated.
6833 * @param cbItem The size of the stack item to pop.
6834 */
6835DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6836{
6837 RTGCPTR GCPtrTop;
6838
6839 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6840 GCPtrTop = pTmpRsp->u -= cbItem;
6841 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6842 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6843 else
6844 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6845 return GCPtrTop;
6846}
6847
6848
6849/**
6850 * Gets the effective stack address for a pop of the specified size and
6851 * calculates and updates the temporary RSP.
6852 *
6853 * @returns Current stack pointer.
6854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6855 * @param pTmpRsp The temporary stack pointer. This is updated.
6856 * @param cbItem The size of the stack item to pop.
6857 */
6858DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6859{
6860 RTGCPTR GCPtrTop;
6861 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6862 {
6863 GCPtrTop = pTmpRsp->u;
6864 pTmpRsp->u += cbItem;
6865 }
6866 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6867 {
6868 GCPtrTop = pTmpRsp->DWords.dw0;
6869 pTmpRsp->DWords.dw0 += cbItem;
6870 }
6871 else
6872 {
6873 GCPtrTop = pTmpRsp->Words.w0;
6874 pTmpRsp->Words.w0 += cbItem;
6875 }
6876 return GCPtrTop;
6877}
6878
6879/** @} */
6880
6881
6882/** @name FPU access and helpers.
6883 *
6884 * @{
6885 */
6886
6887
6888/**
6889 * Hook for preparing to use the host FPU.
6890 *
6891 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6892 *
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 */
6895DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6896{
6897#ifdef IN_RING3
6898 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6899#else
6900 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6901#endif
6902 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6903}
6904
6905
6906/**
6907 * Hook for preparing to use the host FPU for SSE.
6908 *
6909 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6910 *
6911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6912 */
6913DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6914{
6915 iemFpuPrepareUsage(pVCpu);
6916}
6917
6918
6919/**
6920 * Hook for preparing to use the host FPU for AVX.
6921 *
6922 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6923 *
6924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6925 */
6926DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6927{
6928 iemFpuPrepareUsage(pVCpu);
6929}
6930
6931
6932/**
6933 * Hook for actualizing the guest FPU state before the interpreter reads it.
6934 *
6935 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6936 *
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 */
6939DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6940{
6941#ifdef IN_RING3
6942 NOREF(pVCpu);
6943#else
6944 CPUMRZFpuStateActualizeForRead(pVCpu);
6945#endif
6946 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6947}
6948
6949
6950/**
6951 * Hook for actualizing the guest FPU state before the interpreter changes it.
6952 *
6953 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6954 *
6955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6956 */
6957DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6958{
6959#ifdef IN_RING3
6960 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6961#else
6962 CPUMRZFpuStateActualizeForChange(pVCpu);
6963#endif
6964 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6965}
6966
6967
6968/**
6969 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6970 * only.
6971 *
6972 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6973 *
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 */
6976DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6977{
6978#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6979 NOREF(pVCpu);
6980#else
6981 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6982#endif
6983 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6984}
6985
6986
6987/**
6988 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6989 * read+write.
6990 *
6991 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6992 *
6993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6994 */
6995DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
6996{
6997#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6998 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6999#else
7000 CPUMRZFpuStateActualizeForChange(pVCpu);
7001#endif
7002 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7003
7004 /* Make sure any changes are loaded the next time around. */
7005 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
7006}
7007
7008
7009/**
7010 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7011 * only.
7012 *
7013 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7014 *
7015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7016 */
7017DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7018{
7019#ifdef IN_RING3
7020 NOREF(pVCpu);
7021#else
7022 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7023#endif
7024 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7025}
7026
7027
7028/**
7029 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7030 * read+write.
7031 *
7032 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7033 *
7034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7035 */
7036DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7037{
7038#ifdef IN_RING3
7039 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7040#else
7041 CPUMRZFpuStateActualizeForChange(pVCpu);
7042#endif
7043 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7044
7045 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7046 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7047}
7048
7049
7050/**
7051 * Stores a QNaN value into a FPU register.
7052 *
7053 * @param pReg Pointer to the register.
7054 */
7055DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7056{
7057 pReg->au32[0] = UINT32_C(0x00000000);
7058 pReg->au32[1] = UINT32_C(0xc0000000);
7059 pReg->au16[4] = UINT16_C(0xffff);
7060}
7061
7062
7063/**
7064 * Updates the FOP, FPU.CS and FPUIP registers.
7065 *
7066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7067 * @param pFpuCtx The FPU context.
7068 */
7069DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7070{
7071 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7072 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7073 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7074 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7075 {
7076 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7077 * happens in real mode here based on the fnsave and fnstenv images. */
7078 pFpuCtx->CS = 0;
7079 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7080 }
7081 else
7082 {
7083 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7084 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7085 }
7086}
7087
7088
7089/**
7090 * Updates the x87.DS and FPUDP registers.
7091 *
7092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7093 * @param pFpuCtx The FPU context.
7094 * @param iEffSeg The effective segment register.
7095 * @param GCPtrEff The effective address relative to @a iEffSeg.
7096 */
7097DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7098{
7099 RTSEL sel;
7100 switch (iEffSeg)
7101 {
7102 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7103 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7104 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7105 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7106 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7107 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7108 default:
7109 AssertMsgFailed(("%d\n", iEffSeg));
7110 sel = pVCpu->cpum.GstCtx.ds.Sel;
7111 }
7112 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7113 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7114 {
7115 pFpuCtx->DS = 0;
7116 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7117 }
7118 else
7119 {
7120 pFpuCtx->DS = sel;
7121 pFpuCtx->FPUDP = GCPtrEff;
7122 }
7123}
7124
7125
7126/**
7127 * Rotates the stack registers in the push direction.
7128 *
7129 * @param pFpuCtx The FPU context.
7130 * @remarks This is a complete waste of time, but fxsave stores the registers in
7131 * stack order.
7132 */
7133DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7134{
7135 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7136 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7137 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7138 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7139 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7140 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7141 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7142 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7143 pFpuCtx->aRegs[0].r80 = r80Tmp;
7144}
7145
7146
7147/**
7148 * Rotates the stack registers in the pop direction.
7149 *
7150 * @param pFpuCtx The FPU context.
7151 * @remarks This is a complete waste of time, but fxsave stores the registers in
7152 * stack order.
7153 */
7154DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7155{
7156 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7157 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7158 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7159 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7160 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7161 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7162 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7163 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7164 pFpuCtx->aRegs[7].r80 = r80Tmp;
7165}
7166
7167
7168/**
7169 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7170 * exception prevents it.
7171 *
7172 * @param pResult The FPU operation result to push.
7173 * @param pFpuCtx The FPU context.
7174 */
7175IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7176{
7177 /* Update FSW and bail if there are pending exceptions afterwards. */
7178 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7179 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7180 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7181 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7182 {
7183 pFpuCtx->FSW = fFsw;
7184 return;
7185 }
7186
7187 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7188 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7189 {
7190 /* All is fine, push the actual value. */
7191 pFpuCtx->FTW |= RT_BIT(iNewTop);
7192 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7193 }
7194 else if (pFpuCtx->FCW & X86_FCW_IM)
7195 {
7196 /* Masked stack overflow, push QNaN. */
7197 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7198 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7199 }
7200 else
7201 {
7202 /* Raise stack overflow, don't push anything. */
7203 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7204 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7205 return;
7206 }
7207
7208 fFsw &= ~X86_FSW_TOP_MASK;
7209 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7210 pFpuCtx->FSW = fFsw;
7211
7212 iemFpuRotateStackPush(pFpuCtx);
7213}
7214
7215
7216/**
7217 * Stores a result in a FPU register and updates the FSW and FTW.
7218 *
7219 * @param pFpuCtx The FPU context.
7220 * @param pResult The result to store.
7221 * @param iStReg Which FPU register to store it in.
7222 */
7223IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7224{
7225 Assert(iStReg < 8);
7226 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7227 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7228 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7229 pFpuCtx->FTW |= RT_BIT(iReg);
7230 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7231}
7232
7233
7234/**
7235 * Only updates the FPU status word (FSW) with the result of the current
7236 * instruction.
7237 *
7238 * @param pFpuCtx The FPU context.
7239 * @param u16FSW The FSW output of the current instruction.
7240 */
7241IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7242{
7243 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7244 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7245}
7246
7247
7248/**
7249 * Pops one item off the FPU stack if no pending exception prevents it.
7250 *
7251 * @param pFpuCtx The FPU context.
7252 */
7253IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7254{
7255 /* Check pending exceptions. */
7256 uint16_t uFSW = pFpuCtx->FSW;
7257 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7258 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7259 return;
7260
7261 /* TOP--. */
7262 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7263 uFSW &= ~X86_FSW_TOP_MASK;
7264 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7265 pFpuCtx->FSW = uFSW;
7266
7267 /* Mark the previous ST0 as empty. */
7268 iOldTop >>= X86_FSW_TOP_SHIFT;
7269 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7270
7271 /* Rotate the registers. */
7272 iemFpuRotateStackPop(pFpuCtx);
7273}
7274
7275
7276/**
7277 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7278 *
7279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7280 * @param pResult The FPU operation result to push.
7281 */
7282IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7283{
7284 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7285 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7286 iemFpuMaybePushResult(pResult, pFpuCtx);
7287}
7288
7289
7290/**
7291 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7292 * and sets FPUDP and FPUDS.
7293 *
7294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7295 * @param pResult The FPU operation result to push.
7296 * @param iEffSeg The effective segment register.
7297 * @param GCPtrEff The effective address relative to @a iEffSeg.
7298 */
7299IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7300{
7301 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7302 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7303 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7304 iemFpuMaybePushResult(pResult, pFpuCtx);
7305}
7306
7307
7308/**
7309 * Replace ST0 with the first value and push the second onto the FPU stack,
7310 * unless a pending exception prevents it.
7311 *
7312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7313 * @param pResult The FPU operation result to store and push.
7314 */
7315IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7316{
7317 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7318 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7319
7320 /* Update FSW and bail if there are pending exceptions afterwards. */
7321 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7322 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7323 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7324 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7325 {
7326 pFpuCtx->FSW = fFsw;
7327 return;
7328 }
7329
7330 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7331 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7332 {
7333 /* All is fine, push the actual value. */
7334 pFpuCtx->FTW |= RT_BIT(iNewTop);
7335 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7336 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7337 }
7338 else if (pFpuCtx->FCW & X86_FCW_IM)
7339 {
7340 /* Masked stack overflow, push QNaN. */
7341 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7342 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7343 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7344 }
7345 else
7346 {
7347 /* Raise stack overflow, don't push anything. */
7348 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7349 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7350 return;
7351 }
7352
7353 fFsw &= ~X86_FSW_TOP_MASK;
7354 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7355 pFpuCtx->FSW = fFsw;
7356
7357 iemFpuRotateStackPush(pFpuCtx);
7358}
7359
7360
7361/**
7362 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7363 * FOP.
7364 *
7365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7366 * @param pResult The result to store.
7367 * @param iStReg Which FPU register to store it in.
7368 */
7369IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7370{
7371 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7372 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7373 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7374}
7375
7376
7377/**
7378 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7379 * FOP, and then pops the stack.
7380 *
7381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7382 * @param pResult The result to store.
7383 * @param iStReg Which FPU register to store it in.
7384 */
7385IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7386{
7387 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7388 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7389 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7390 iemFpuMaybePopOne(pFpuCtx);
7391}
7392
7393
7394/**
7395 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7396 * FPUDP, and FPUDS.
7397 *
7398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7399 * @param pResult The result to store.
7400 * @param iStReg Which FPU register to store it in.
7401 * @param iEffSeg The effective memory operand selector register.
7402 * @param GCPtrEff The effective memory operand offset.
7403 */
7404IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7405 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7406{
7407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7408 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7409 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7410 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7411}
7412
7413
7414/**
7415 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7416 * FPUDP, and FPUDS, and then pops the stack.
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param pResult The result to store.
7420 * @param iStReg Which FPU register to store it in.
7421 * @param iEffSeg The effective memory operand selector register.
7422 * @param GCPtrEff The effective memory operand offset.
7423 */
7424IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7425 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7426{
7427 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7428 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7429 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7430 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7431 iemFpuMaybePopOne(pFpuCtx);
7432}
7433
7434
7435/**
7436 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7437 *
7438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7439 */
7440IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7441{
7442 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7443 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7444}
7445
7446
7447/**
7448 * Marks the specified stack register as free (for FFREE).
7449 *
7450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7451 * @param iStReg The register to free.
7452 */
7453IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7454{
7455 Assert(iStReg < 8);
7456 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7457 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7458 pFpuCtx->FTW &= ~RT_BIT(iReg);
7459}
7460
7461
7462/**
7463 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7464 *
7465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7466 */
7467IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7468{
7469 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7470 uint16_t uFsw = pFpuCtx->FSW;
7471 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7472 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7473 uFsw &= ~X86_FSW_TOP_MASK;
7474 uFsw |= uTop;
7475 pFpuCtx->FSW = uFsw;
7476}
7477
7478
7479/**
7480 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7481 *
7482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7483 */
7484IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7485{
7486 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7487 uint16_t uFsw = pFpuCtx->FSW;
7488 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7489 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7490 uFsw &= ~X86_FSW_TOP_MASK;
7491 uFsw |= uTop;
7492 pFpuCtx->FSW = uFsw;
7493}
7494
7495
7496/**
7497 * Updates the FSW, FOP, FPUIP, and FPUCS.
7498 *
7499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7500 * @param u16FSW The FSW from the current instruction.
7501 */
7502IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7503{
7504 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7505 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7506 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7507}
7508
7509
7510/**
7511 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7512 *
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param u16FSW The FSW from the current instruction.
7515 */
7516IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7517{
7518 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7519 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7520 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7521 iemFpuMaybePopOne(pFpuCtx);
7522}
7523
7524
7525/**
7526 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7527 *
7528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7529 * @param u16FSW The FSW from the current instruction.
7530 * @param iEffSeg The effective memory operand selector register.
7531 * @param GCPtrEff The effective memory operand offset.
7532 */
7533IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7534{
7535 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7536 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7537 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7538 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7539}
7540
7541
7542/**
7543 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7544 *
7545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7546 * @param u16FSW The FSW from the current instruction.
7547 */
7548IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7549{
7550 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7551 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7552 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7553 iemFpuMaybePopOne(pFpuCtx);
7554 iemFpuMaybePopOne(pFpuCtx);
7555}
7556
7557
7558/**
7559 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7560 *
7561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7562 * @param u16FSW The FSW from the current instruction.
7563 * @param iEffSeg The effective memory operand selector register.
7564 * @param GCPtrEff The effective memory operand offset.
7565 */
7566IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7567{
7568 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7569 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7570 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7571 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7572 iemFpuMaybePopOne(pFpuCtx);
7573}
7574
7575
7576/**
7577 * Worker routine for raising an FPU stack underflow exception.
7578 *
7579 * @param pFpuCtx The FPU context.
7580 * @param iStReg The stack register being accessed.
7581 */
7582IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7583{
7584 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7585 if (pFpuCtx->FCW & X86_FCW_IM)
7586 {
7587 /* Masked underflow. */
7588 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7589 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7590 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7591 if (iStReg != UINT8_MAX)
7592 {
7593 pFpuCtx->FTW |= RT_BIT(iReg);
7594 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7595 }
7596 }
7597 else
7598 {
7599 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7600 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7601 }
7602}
7603
7604
7605/**
7606 * Raises a FPU stack underflow exception.
7607 *
7608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7609 * @param iStReg The destination register that should be loaded
7610 * with QNaN if \#IS is not masked. Specify
7611 * UINT8_MAX if none (like for fcom).
7612 */
7613DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7614{
7615 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7616 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7617 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7618}
7619
7620
7621DECL_NO_INLINE(IEM_STATIC, void)
7622iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7623{
7624 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7625 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7626 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7627 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7628}
7629
7630
7631DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7632{
7633 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7635 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7636 iemFpuMaybePopOne(pFpuCtx);
7637}
7638
7639
7640DECL_NO_INLINE(IEM_STATIC, void)
7641iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7642{
7643 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7644 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7646 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7647 iemFpuMaybePopOne(pFpuCtx);
7648}
7649
7650
7651DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7652{
7653 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7654 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7655 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7656 iemFpuMaybePopOne(pFpuCtx);
7657 iemFpuMaybePopOne(pFpuCtx);
7658}
7659
7660
7661DECL_NO_INLINE(IEM_STATIC, void)
7662iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7663{
7664 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7665 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7666
7667 if (pFpuCtx->FCW & X86_FCW_IM)
7668 {
7669 /* Masked overflow - Push QNaN. */
7670 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7671 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7672 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7673 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7674 pFpuCtx->FTW |= RT_BIT(iNewTop);
7675 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7676 iemFpuRotateStackPush(pFpuCtx);
7677 }
7678 else
7679 {
7680 /* Exception pending - don't change TOP or the register stack. */
7681 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7682 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7683 }
7684}
7685
7686
7687DECL_NO_INLINE(IEM_STATIC, void)
7688iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7689{
7690 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7691 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7692
7693 if (pFpuCtx->FCW & X86_FCW_IM)
7694 {
7695 /* Masked overflow - Push QNaN. */
7696 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7697 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7699 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7700 pFpuCtx->FTW |= RT_BIT(iNewTop);
7701 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7702 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7703 iemFpuRotateStackPush(pFpuCtx);
7704 }
7705 else
7706 {
7707 /* Exception pending - don't change TOP or the register stack. */
7708 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7709 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7710 }
7711}
7712
7713
7714/**
7715 * Worker routine for raising an FPU stack overflow exception on a push.
7716 *
7717 * @param pFpuCtx The FPU context.
7718 */
7719IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7720{
7721 if (pFpuCtx->FCW & X86_FCW_IM)
7722 {
7723 /* Masked overflow. */
7724 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7725 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7726 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7727 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7728 pFpuCtx->FTW |= RT_BIT(iNewTop);
7729 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7730 iemFpuRotateStackPush(pFpuCtx);
7731 }
7732 else
7733 {
7734 /* Exception pending - don't change TOP or the register stack. */
7735 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7736 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7737 }
7738}
7739
7740
7741/**
7742 * Raises a FPU stack overflow exception on a push.
7743 *
7744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7745 */
7746DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7747{
7748 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7749 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7750 iemFpuStackPushOverflowOnly(pFpuCtx);
7751}
7752
7753
7754/**
7755 * Raises a FPU stack overflow exception on a push with a memory operand.
7756 *
7757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7758 * @param iEffSeg The effective memory operand selector register.
7759 * @param GCPtrEff The effective memory operand offset.
7760 */
7761DECL_NO_INLINE(IEM_STATIC, void)
7762iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7763{
7764 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7765 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7766 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7767 iemFpuStackPushOverflowOnly(pFpuCtx);
7768}
7769
7770
7771IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7772{
7773 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7774 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7775 if (pFpuCtx->FTW & RT_BIT(iReg))
7776 return VINF_SUCCESS;
7777 return VERR_NOT_FOUND;
7778}
7779
7780
7781IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7782{
7783 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7784 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7785 if (pFpuCtx->FTW & RT_BIT(iReg))
7786 {
7787 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7788 return VINF_SUCCESS;
7789 }
7790 return VERR_NOT_FOUND;
7791}
7792
7793
7794IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7795 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7796{
7797 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7798 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7799 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7800 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7801 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7802 {
7803 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7804 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7805 return VINF_SUCCESS;
7806 }
7807 return VERR_NOT_FOUND;
7808}
7809
7810
7811IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7812{
7813 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7814 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7815 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7816 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7817 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7818 {
7819 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7820 return VINF_SUCCESS;
7821 }
7822 return VERR_NOT_FOUND;
7823}
7824
7825
7826/**
7827 * Updates the FPU exception status after FCW is changed.
7828 *
7829 * @param pFpuCtx The FPU context.
7830 */
7831IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7832{
7833 uint16_t u16Fsw = pFpuCtx->FSW;
7834 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7835 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7836 else
7837 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7838 pFpuCtx->FSW = u16Fsw;
7839}
7840
7841
7842/**
7843 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7844 *
7845 * @returns The full FTW.
7846 * @param pFpuCtx The FPU context.
7847 */
7848IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7849{
7850 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7851 uint16_t u16Ftw = 0;
7852 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7853 for (unsigned iSt = 0; iSt < 8; iSt++)
7854 {
7855 unsigned const iReg = (iSt + iTop) & 7;
7856 if (!(u8Ftw & RT_BIT(iReg)))
7857 u16Ftw |= 3 << (iReg * 2); /* empty */
7858 else
7859 {
7860 uint16_t uTag;
7861 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7862 if (pr80Reg->s.uExponent == 0x7fff)
7863 uTag = 2; /* Exponent is all 1's => Special. */
7864 else if (pr80Reg->s.uExponent == 0x0000)
7865 {
7866 if (pr80Reg->s.u64Mantissa == 0x0000)
7867 uTag = 1; /* All bits are zero => Zero. */
7868 else
7869 uTag = 2; /* Must be special. */
7870 }
7871 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7872 uTag = 0; /* Valid. */
7873 else
7874 uTag = 2; /* Must be special. */
7875
7876 u16Ftw |= uTag << (iReg * 2); /* empty */
7877 }
7878 }
7879
7880 return u16Ftw;
7881}
7882
7883
7884/**
7885 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7886 *
7887 * @returns The compressed FTW.
7888 * @param u16FullFtw The full FTW to convert.
7889 */
7890IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7891{
7892 uint8_t u8Ftw = 0;
7893 for (unsigned i = 0; i < 8; i++)
7894 {
7895 if ((u16FullFtw & 3) != 3 /*empty*/)
7896 u8Ftw |= RT_BIT(i);
7897 u16FullFtw >>= 2;
7898 }
7899
7900 return u8Ftw;
7901}
7902
7903/** @} */
7904
7905
7906/** @name Memory access.
7907 *
7908 * @{
7909 */
7910
7911
7912/**
7913 * Updates the IEMCPU::cbWritten counter if applicable.
7914 *
7915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7916 * @param fAccess The access being accounted for.
7917 * @param cbMem The access size.
7918 */
7919DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7920{
7921 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7922 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7923 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7924}
7925
7926
7927/**
7928 * Checks if the given segment can be written to, raise the appropriate
7929 * exception if not.
7930 *
7931 * @returns VBox strict status code.
7932 *
7933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7934 * @param pHid Pointer to the hidden register.
7935 * @param iSegReg The register number.
7936 * @param pu64BaseAddr Where to return the base address to use for the
7937 * segment. (In 64-bit code it may differ from the
7938 * base in the hidden segment.)
7939 */
7940IEM_STATIC VBOXSTRICTRC
7941iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7942{
7943 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7944
7945 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7946 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7947 else
7948 {
7949 if (!pHid->Attr.n.u1Present)
7950 {
7951 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7952 AssertRelease(uSel == 0);
7953 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7954 return iemRaiseGeneralProtectionFault0(pVCpu);
7955 }
7956
7957 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7958 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7959 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7960 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7961 *pu64BaseAddr = pHid->u64Base;
7962 }
7963 return VINF_SUCCESS;
7964}
7965
7966
7967/**
7968 * Checks if the given segment can be read from, raise the appropriate
7969 * exception if not.
7970 *
7971 * @returns VBox strict status code.
7972 *
7973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7974 * @param pHid Pointer to the hidden register.
7975 * @param iSegReg The register number.
7976 * @param pu64BaseAddr Where to return the base address to use for the
7977 * segment. (In 64-bit code it may differ from the
7978 * base in the hidden segment.)
7979 */
7980IEM_STATIC VBOXSTRICTRC
7981iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7982{
7983 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7984
7985 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7986 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7987 else
7988 {
7989 if (!pHid->Attr.n.u1Present)
7990 {
7991 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7992 AssertRelease(uSel == 0);
7993 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7994 return iemRaiseGeneralProtectionFault0(pVCpu);
7995 }
7996
7997 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7998 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7999 *pu64BaseAddr = pHid->u64Base;
8000 }
8001 return VINF_SUCCESS;
8002}
8003
8004
8005/**
8006 * Applies the segment limit, base and attributes.
8007 *
8008 * This may raise a \#GP or \#SS.
8009 *
8010 * @returns VBox strict status code.
8011 *
8012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8013 * @param fAccess The kind of access which is being performed.
8014 * @param iSegReg The index of the segment register to apply.
8015 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8016 * TSS, ++).
8017 * @param cbMem The access size.
8018 * @param pGCPtrMem Pointer to the guest memory address to apply
8019 * segmentation to. Input and output parameter.
8020 */
8021IEM_STATIC VBOXSTRICTRC
8022iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8023{
8024 if (iSegReg == UINT8_MAX)
8025 return VINF_SUCCESS;
8026
8027 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8028 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8029 switch (pVCpu->iem.s.enmCpuMode)
8030 {
8031 case IEMMODE_16BIT:
8032 case IEMMODE_32BIT:
8033 {
8034 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8035 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8036
8037 if ( pSel->Attr.n.u1Present
8038 && !pSel->Attr.n.u1Unusable)
8039 {
8040 Assert(pSel->Attr.n.u1DescType);
8041 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8042 {
8043 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8044 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8045 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8046
8047 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8048 {
8049 /** @todo CPL check. */
8050 }
8051
8052 /*
8053 * There are two kinds of data selectors, normal and expand down.
8054 */
8055 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8056 {
8057 if ( GCPtrFirst32 > pSel->u32Limit
8058 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8059 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8060 }
8061 else
8062 {
8063 /*
8064 * The upper boundary is defined by the B bit, not the G bit!
8065 */
8066 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8067 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8068 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8069 }
8070 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8071 }
8072 else
8073 {
8074
8075 /*
8076 * Code selector and usually be used to read thru, writing is
8077 * only permitted in real and V8086 mode.
8078 */
8079 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8080 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8081 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8082 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8083 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8084
8085 if ( GCPtrFirst32 > pSel->u32Limit
8086 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8087 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8088
8089 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8090 {
8091 /** @todo CPL check. */
8092 }
8093
8094 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8095 }
8096 }
8097 else
8098 return iemRaiseGeneralProtectionFault0(pVCpu);
8099 return VINF_SUCCESS;
8100 }
8101
8102 case IEMMODE_64BIT:
8103 {
8104 RTGCPTR GCPtrMem = *pGCPtrMem;
8105 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8106 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8107
8108 Assert(cbMem >= 1);
8109 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8110 return VINF_SUCCESS;
8111 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8112 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8113 return iemRaiseGeneralProtectionFault0(pVCpu);
8114 }
8115
8116 default:
8117 AssertFailedReturn(VERR_IEM_IPE_7);
8118 }
8119}
8120
8121
8122/**
8123 * Translates a virtual address to a physical physical address and checks if we
8124 * can access the page as specified.
8125 *
8126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8127 * @param GCPtrMem The virtual address.
8128 * @param fAccess The intended access.
8129 * @param pGCPhysMem Where to return the physical address.
8130 */
8131IEM_STATIC VBOXSTRICTRC
8132iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8133{
8134 /** @todo Need a different PGM interface here. We're currently using
8135 * generic / REM interfaces. this won't cut it for R0. */
8136 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8137 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8138 * here. */
8139 RTGCPHYS GCPhys;
8140 uint64_t fFlags;
8141 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8142 if (RT_FAILURE(rc))
8143 {
8144 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8145 /** @todo Check unassigned memory in unpaged mode. */
8146 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8147 *pGCPhysMem = NIL_RTGCPHYS;
8148 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8149 }
8150
8151 /* If the page is writable and does not have the no-exec bit set, all
8152 access is allowed. Otherwise we'll have to check more carefully... */
8153 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8154 {
8155 /* Write to read only memory? */
8156 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8157 && !(fFlags & X86_PTE_RW)
8158 && ( ( pVCpu->iem.s.uCpl == 3
8159 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8160 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8161 {
8162 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8163 *pGCPhysMem = NIL_RTGCPHYS;
8164 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8165 }
8166
8167 /* Kernel memory accessed by userland? */
8168 if ( !(fFlags & X86_PTE_US)
8169 && pVCpu->iem.s.uCpl == 3
8170 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8171 {
8172 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8173 *pGCPhysMem = NIL_RTGCPHYS;
8174 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8175 }
8176
8177 /* Executing non-executable memory? */
8178 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8179 && (fFlags & X86_PTE_PAE_NX)
8180 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8181 {
8182 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8183 *pGCPhysMem = NIL_RTGCPHYS;
8184 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8185 VERR_ACCESS_DENIED);
8186 }
8187 }
8188
8189 /*
8190 * Set the dirty / access flags.
8191 * ASSUMES this is set when the address is translated rather than on committ...
8192 */
8193 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8194 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8195 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8196 {
8197 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8198 AssertRC(rc2);
8199 }
8200
8201 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8202 *pGCPhysMem = GCPhys;
8203 return VINF_SUCCESS;
8204}
8205
8206
8207
8208/**
8209 * Maps a physical page.
8210 *
8211 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8212 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8213 * @param GCPhysMem The physical address.
8214 * @param fAccess The intended access.
8215 * @param ppvMem Where to return the mapping address.
8216 * @param pLock The PGM lock.
8217 */
8218IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8219{
8220#ifdef IEM_LOG_MEMORY_WRITES
8221 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8222 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8223#endif
8224
8225 /** @todo This API may require some improving later. A private deal with PGM
8226 * regarding locking and unlocking needs to be struct. A couple of TLBs
8227 * living in PGM, but with publicly accessible inlined access methods
8228 * could perhaps be an even better solution. */
8229 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8230 GCPhysMem,
8231 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8232 pVCpu->iem.s.fBypassHandlers,
8233 ppvMem,
8234 pLock);
8235 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8236 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8237
8238 return rc;
8239}
8240
8241
8242/**
8243 * Unmap a page previously mapped by iemMemPageMap.
8244 *
8245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8246 * @param GCPhysMem The physical address.
8247 * @param fAccess The intended access.
8248 * @param pvMem What iemMemPageMap returned.
8249 * @param pLock The PGM lock.
8250 */
8251DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8252{
8253 NOREF(pVCpu);
8254 NOREF(GCPhysMem);
8255 NOREF(fAccess);
8256 NOREF(pvMem);
8257 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8258}
8259
8260
8261/**
8262 * Looks up a memory mapping entry.
8263 *
8264 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8265 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8266 * @param pvMem The memory address.
8267 * @param fAccess The access to.
8268 */
8269DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8270{
8271 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8272 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8273 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8274 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8275 return 0;
8276 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8277 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8278 return 1;
8279 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8280 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8281 return 2;
8282 return VERR_NOT_FOUND;
8283}
8284
8285
8286/**
8287 * Finds a free memmap entry when using iNextMapping doesn't work.
8288 *
8289 * @returns Memory mapping index, 1024 on failure.
8290 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8291 */
8292IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8293{
8294 /*
8295 * The easy case.
8296 */
8297 if (pVCpu->iem.s.cActiveMappings == 0)
8298 {
8299 pVCpu->iem.s.iNextMapping = 1;
8300 return 0;
8301 }
8302
8303 /* There should be enough mappings for all instructions. */
8304 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8305
8306 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8307 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8308 return i;
8309
8310 AssertFailedReturn(1024);
8311}
8312
8313
8314/**
8315 * Commits a bounce buffer that needs writing back and unmaps it.
8316 *
8317 * @returns Strict VBox status code.
8318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8319 * @param iMemMap The index of the buffer to commit.
8320 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8321 * Always false in ring-3, obviously.
8322 */
8323IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8324{
8325 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8326 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8327#ifdef IN_RING3
8328 Assert(!fPostponeFail);
8329 RT_NOREF_PV(fPostponeFail);
8330#endif
8331
8332 /*
8333 * Do the writing.
8334 */
8335 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8336 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8337 {
8338 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8339 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8340 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8341 if (!pVCpu->iem.s.fBypassHandlers)
8342 {
8343 /*
8344 * Carefully and efficiently dealing with access handler return
8345 * codes make this a little bloated.
8346 */
8347 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8348 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8349 pbBuf,
8350 cbFirst,
8351 PGMACCESSORIGIN_IEM);
8352 if (rcStrict == VINF_SUCCESS)
8353 {
8354 if (cbSecond)
8355 {
8356 rcStrict = PGMPhysWrite(pVM,
8357 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8358 pbBuf + cbFirst,
8359 cbSecond,
8360 PGMACCESSORIGIN_IEM);
8361 if (rcStrict == VINF_SUCCESS)
8362 { /* nothing */ }
8363 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8364 {
8365 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8367 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8368 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8369 }
8370#ifndef IN_RING3
8371 else if (fPostponeFail)
8372 {
8373 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8374 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8376 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8377 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8378 return iemSetPassUpStatus(pVCpu, rcStrict);
8379 }
8380#endif
8381 else
8382 {
8383 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8385 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8386 return rcStrict;
8387 }
8388 }
8389 }
8390 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8391 {
8392 if (!cbSecond)
8393 {
8394 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8395 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8396 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8397 }
8398 else
8399 {
8400 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8401 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8402 pbBuf + cbFirst,
8403 cbSecond,
8404 PGMACCESSORIGIN_IEM);
8405 if (rcStrict2 == VINF_SUCCESS)
8406 {
8407 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8409 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8410 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8411 }
8412 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8413 {
8414 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8416 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8417 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8418 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8419 }
8420#ifndef IN_RING3
8421 else if (fPostponeFail)
8422 {
8423 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8425 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8426 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8427 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8428 return iemSetPassUpStatus(pVCpu, rcStrict);
8429 }
8430#endif
8431 else
8432 {
8433 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8435 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8436 return rcStrict2;
8437 }
8438 }
8439 }
8440#ifndef IN_RING3
8441 else if (fPostponeFail)
8442 {
8443 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8445 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8446 if (!cbSecond)
8447 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8448 else
8449 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8450 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8451 return iemSetPassUpStatus(pVCpu, rcStrict);
8452 }
8453#endif
8454 else
8455 {
8456 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8457 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8458 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8459 return rcStrict;
8460 }
8461 }
8462 else
8463 {
8464 /*
8465 * No access handlers, much simpler.
8466 */
8467 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8468 if (RT_SUCCESS(rc))
8469 {
8470 if (cbSecond)
8471 {
8472 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8473 if (RT_SUCCESS(rc))
8474 { /* likely */ }
8475 else
8476 {
8477 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8479 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8480 return rc;
8481 }
8482 }
8483 }
8484 else
8485 {
8486 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8488 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8489 return rc;
8490 }
8491 }
8492 }
8493
8494#if defined(IEM_LOG_MEMORY_WRITES)
8495 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8496 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8497 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8498 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8499 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8500 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8501
8502 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8503 g_cbIemWrote = cbWrote;
8504 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8505#endif
8506
8507 /*
8508 * Free the mapping entry.
8509 */
8510 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8511 Assert(pVCpu->iem.s.cActiveMappings != 0);
8512 pVCpu->iem.s.cActiveMappings--;
8513 return VINF_SUCCESS;
8514}
8515
8516
8517/**
8518 * iemMemMap worker that deals with a request crossing pages.
8519 */
8520IEM_STATIC VBOXSTRICTRC
8521iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8522{
8523 /*
8524 * Do the address translations.
8525 */
8526 RTGCPHYS GCPhysFirst;
8527 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8528 if (rcStrict != VINF_SUCCESS)
8529 return rcStrict;
8530
8531 RTGCPHYS GCPhysSecond;
8532 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8533 fAccess, &GCPhysSecond);
8534 if (rcStrict != VINF_SUCCESS)
8535 return rcStrict;
8536 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8537
8538 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8539
8540 /*
8541 * Read in the current memory content if it's a read, execute or partial
8542 * write access.
8543 */
8544 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8545 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8546 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8547
8548 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8549 {
8550 if (!pVCpu->iem.s.fBypassHandlers)
8551 {
8552 /*
8553 * Must carefully deal with access handler status codes here,
8554 * makes the code a bit bloated.
8555 */
8556 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8557 if (rcStrict == VINF_SUCCESS)
8558 {
8559 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8560 if (rcStrict == VINF_SUCCESS)
8561 { /*likely */ }
8562 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8563 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8564 else
8565 {
8566 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8567 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8568 return rcStrict;
8569 }
8570 }
8571 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8572 {
8573 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8574 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8575 {
8576 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8577 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8578 }
8579 else
8580 {
8581 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8582 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8583 return rcStrict2;
8584 }
8585 }
8586 else
8587 {
8588 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8589 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8590 return rcStrict;
8591 }
8592 }
8593 else
8594 {
8595 /*
8596 * No informational status codes here, much more straight forward.
8597 */
8598 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8599 if (RT_SUCCESS(rc))
8600 {
8601 Assert(rc == VINF_SUCCESS);
8602 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8603 if (RT_SUCCESS(rc))
8604 Assert(rc == VINF_SUCCESS);
8605 else
8606 {
8607 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8608 return rc;
8609 }
8610 }
8611 else
8612 {
8613 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8614 return rc;
8615 }
8616 }
8617 }
8618#ifdef VBOX_STRICT
8619 else
8620 memset(pbBuf, 0xcc, cbMem);
8621 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8622 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8623#endif
8624
8625 /*
8626 * Commit the bounce buffer entry.
8627 */
8628 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8629 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8630 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8631 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8632 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8633 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8634 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8635 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8636 pVCpu->iem.s.cActiveMappings++;
8637
8638 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8639 *ppvMem = pbBuf;
8640 return VINF_SUCCESS;
8641}
8642
8643
8644/**
8645 * iemMemMap woker that deals with iemMemPageMap failures.
8646 */
8647IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8648 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8649{
8650 /*
8651 * Filter out conditions we can handle and the ones which shouldn't happen.
8652 */
8653 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8654 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8655 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8656 {
8657 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8658 return rcMap;
8659 }
8660 pVCpu->iem.s.cPotentialExits++;
8661
8662 /*
8663 * Read in the current memory content if it's a read, execute or partial
8664 * write access.
8665 */
8666 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8667 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8668 {
8669 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8670 memset(pbBuf, 0xff, cbMem);
8671 else
8672 {
8673 int rc;
8674 if (!pVCpu->iem.s.fBypassHandlers)
8675 {
8676 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8677 if (rcStrict == VINF_SUCCESS)
8678 { /* nothing */ }
8679 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8680 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8681 else
8682 {
8683 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8684 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8685 return rcStrict;
8686 }
8687 }
8688 else
8689 {
8690 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8691 if (RT_SUCCESS(rc))
8692 { /* likely */ }
8693 else
8694 {
8695 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8696 GCPhysFirst, rc));
8697 return rc;
8698 }
8699 }
8700 }
8701 }
8702#ifdef VBOX_STRICT
8703 else
8704 memset(pbBuf, 0xcc, cbMem);
8705#endif
8706#ifdef VBOX_STRICT
8707 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8708 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8709#endif
8710
8711 /*
8712 * Commit the bounce buffer entry.
8713 */
8714 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8715 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8716 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8717 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8718 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8719 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8720 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8721 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8722 pVCpu->iem.s.cActiveMappings++;
8723
8724 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8725 *ppvMem = pbBuf;
8726 return VINF_SUCCESS;
8727}
8728
8729
8730
8731/**
8732 * Maps the specified guest memory for the given kind of access.
8733 *
8734 * This may be using bounce buffering of the memory if it's crossing a page
8735 * boundary or if there is an access handler installed for any of it. Because
8736 * of lock prefix guarantees, we're in for some extra clutter when this
8737 * happens.
8738 *
8739 * This may raise a \#GP, \#SS, \#PF or \#AC.
8740 *
8741 * @returns VBox strict status code.
8742 *
8743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8744 * @param ppvMem Where to return the pointer to the mapped
8745 * memory.
8746 * @param cbMem The number of bytes to map. This is usually 1,
8747 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8748 * string operations it can be up to a page.
8749 * @param iSegReg The index of the segment register to use for
8750 * this access. The base and limits are checked.
8751 * Use UINT8_MAX to indicate that no segmentation
8752 * is required (for IDT, GDT and LDT accesses).
8753 * @param GCPtrMem The address of the guest memory.
8754 * @param fAccess How the memory is being accessed. The
8755 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8756 * how to map the memory, while the
8757 * IEM_ACCESS_WHAT_XXX bit is used when raising
8758 * exceptions.
8759 */
8760IEM_STATIC VBOXSTRICTRC
8761iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8762{
8763 /*
8764 * Check the input and figure out which mapping entry to use.
8765 */
8766 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8767 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8768 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8769
8770 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8771 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8772 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8773 {
8774 iMemMap = iemMemMapFindFree(pVCpu);
8775 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8776 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8777 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8778 pVCpu->iem.s.aMemMappings[2].fAccess),
8779 VERR_IEM_IPE_9);
8780 }
8781
8782 /*
8783 * Map the memory, checking that we can actually access it. If something
8784 * slightly complicated happens, fall back on bounce buffering.
8785 */
8786 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8787 if (rcStrict != VINF_SUCCESS)
8788 return rcStrict;
8789
8790 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8791 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8792
8793 RTGCPHYS GCPhysFirst;
8794 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8795 if (rcStrict != VINF_SUCCESS)
8796 return rcStrict;
8797
8798 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8799 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8800 if (fAccess & IEM_ACCESS_TYPE_READ)
8801 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8802
8803 void *pvMem;
8804 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8805 if (rcStrict != VINF_SUCCESS)
8806 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8807
8808 /*
8809 * Fill in the mapping table entry.
8810 */
8811 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8812 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8813 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8814 pVCpu->iem.s.cActiveMappings++;
8815
8816 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8817 *ppvMem = pvMem;
8818
8819 return VINF_SUCCESS;
8820}
8821
8822
8823/**
8824 * Commits the guest memory if bounce buffered and unmaps it.
8825 *
8826 * @returns Strict VBox status code.
8827 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8828 * @param pvMem The mapping.
8829 * @param fAccess The kind of access.
8830 */
8831IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8832{
8833 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8834 AssertReturn(iMemMap >= 0, iMemMap);
8835
8836 /* If it's bounce buffered, we may need to write back the buffer. */
8837 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8838 {
8839 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8840 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8841 }
8842 /* Otherwise unlock it. */
8843 else
8844 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8845
8846 /* Free the entry. */
8847 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8848 Assert(pVCpu->iem.s.cActiveMappings != 0);
8849 pVCpu->iem.s.cActiveMappings--;
8850 return VINF_SUCCESS;
8851}
8852
8853#ifdef IEM_WITH_SETJMP
8854
8855/**
8856 * Maps the specified guest memory for the given kind of access, longjmp on
8857 * error.
8858 *
8859 * This may be using bounce buffering of the memory if it's crossing a page
8860 * boundary or if there is an access handler installed for any of it. Because
8861 * of lock prefix guarantees, we're in for some extra clutter when this
8862 * happens.
8863 *
8864 * This may raise a \#GP, \#SS, \#PF or \#AC.
8865 *
8866 * @returns Pointer to the mapped memory.
8867 *
8868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8869 * @param cbMem The number of bytes to map. This is usually 1,
8870 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8871 * string operations it can be up to a page.
8872 * @param iSegReg The index of the segment register to use for
8873 * this access. The base and limits are checked.
8874 * Use UINT8_MAX to indicate that no segmentation
8875 * is required (for IDT, GDT and LDT accesses).
8876 * @param GCPtrMem The address of the guest memory.
8877 * @param fAccess How the memory is being accessed. The
8878 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8879 * how to map the memory, while the
8880 * IEM_ACCESS_WHAT_XXX bit is used when raising
8881 * exceptions.
8882 */
8883IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8884{
8885 /*
8886 * Check the input and figure out which mapping entry to use.
8887 */
8888 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8889 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8890 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8891
8892 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8893 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8894 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8895 {
8896 iMemMap = iemMemMapFindFree(pVCpu);
8897 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8898 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8899 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8900 pVCpu->iem.s.aMemMappings[2].fAccess),
8901 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8902 }
8903
8904 /*
8905 * Map the memory, checking that we can actually access it. If something
8906 * slightly complicated happens, fall back on bounce buffering.
8907 */
8908 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8909 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8910 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8911
8912 /* Crossing a page boundary? */
8913 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8914 { /* No (likely). */ }
8915 else
8916 {
8917 void *pvMem;
8918 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8919 if (rcStrict == VINF_SUCCESS)
8920 return pvMem;
8921 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8922 }
8923
8924 RTGCPHYS GCPhysFirst;
8925 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8926 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8927 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8928
8929 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8930 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8931 if (fAccess & IEM_ACCESS_TYPE_READ)
8932 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8933
8934 void *pvMem;
8935 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8936 if (rcStrict == VINF_SUCCESS)
8937 { /* likely */ }
8938 else
8939 {
8940 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8941 if (rcStrict == VINF_SUCCESS)
8942 return pvMem;
8943 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8944 }
8945
8946 /*
8947 * Fill in the mapping table entry.
8948 */
8949 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8950 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8951 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8952 pVCpu->iem.s.cActiveMappings++;
8953
8954 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8955 return pvMem;
8956}
8957
8958
8959/**
8960 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8961 *
8962 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8963 * @param pvMem The mapping.
8964 * @param fAccess The kind of access.
8965 */
8966IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8967{
8968 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8969 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8970
8971 /* If it's bounce buffered, we may need to write back the buffer. */
8972 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8973 {
8974 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8975 {
8976 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8977 if (rcStrict == VINF_SUCCESS)
8978 return;
8979 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8980 }
8981 }
8982 /* Otherwise unlock it. */
8983 else
8984 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8985
8986 /* Free the entry. */
8987 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8988 Assert(pVCpu->iem.s.cActiveMappings != 0);
8989 pVCpu->iem.s.cActiveMappings--;
8990}
8991
8992#endif /* IEM_WITH_SETJMP */
8993
8994#ifndef IN_RING3
8995/**
8996 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8997 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8998 *
8999 * Allows the instruction to be completed and retired, while the IEM user will
9000 * return to ring-3 immediately afterwards and do the postponed writes there.
9001 *
9002 * @returns VBox status code (no strict statuses). Caller must check
9003 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9005 * @param pvMem The mapping.
9006 * @param fAccess The kind of access.
9007 */
9008IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9009{
9010 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9011 AssertReturn(iMemMap >= 0, iMemMap);
9012
9013 /* If it's bounce buffered, we may need to write back the buffer. */
9014 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9015 {
9016 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9017 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9018 }
9019 /* Otherwise unlock it. */
9020 else
9021 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9022
9023 /* Free the entry. */
9024 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9025 Assert(pVCpu->iem.s.cActiveMappings != 0);
9026 pVCpu->iem.s.cActiveMappings--;
9027 return VINF_SUCCESS;
9028}
9029#endif
9030
9031
9032/**
9033 * Rollbacks mappings, releasing page locks and such.
9034 *
9035 * The caller shall only call this after checking cActiveMappings.
9036 *
9037 * @returns Strict VBox status code to pass up.
9038 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9039 */
9040IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9041{
9042 Assert(pVCpu->iem.s.cActiveMappings > 0);
9043
9044 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9045 while (iMemMap-- > 0)
9046 {
9047 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9048 if (fAccess != IEM_ACCESS_INVALID)
9049 {
9050 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9051 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9052 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9053 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9054 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9055 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9056 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9057 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9058 pVCpu->iem.s.cActiveMappings--;
9059 }
9060 }
9061}
9062
9063
9064/**
9065 * Fetches a data byte.
9066 *
9067 * @returns Strict VBox status code.
9068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9069 * @param pu8Dst Where to return the byte.
9070 * @param iSegReg The index of the segment register to use for
9071 * this access. The base and limits are checked.
9072 * @param GCPtrMem The address of the guest memory.
9073 */
9074IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9075{
9076 /* The lazy approach for now... */
9077 uint8_t const *pu8Src;
9078 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9079 if (rc == VINF_SUCCESS)
9080 {
9081 *pu8Dst = *pu8Src;
9082 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9083 }
9084 return rc;
9085}
9086
9087
9088#ifdef IEM_WITH_SETJMP
9089/**
9090 * Fetches a data byte, longjmp on error.
9091 *
9092 * @returns The byte.
9093 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9094 * @param iSegReg The index of the segment register to use for
9095 * this access. The base and limits are checked.
9096 * @param GCPtrMem The address of the guest memory.
9097 */
9098DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9099{
9100 /* The lazy approach for now... */
9101 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9102 uint8_t const bRet = *pu8Src;
9103 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9104 return bRet;
9105}
9106#endif /* IEM_WITH_SETJMP */
9107
9108
9109/**
9110 * Fetches a data word.
9111 *
9112 * @returns Strict VBox status code.
9113 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9114 * @param pu16Dst Where to return the word.
9115 * @param iSegReg The index of the segment register to use for
9116 * this access. The base and limits are checked.
9117 * @param GCPtrMem The address of the guest memory.
9118 */
9119IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9120{
9121 /* The lazy approach for now... */
9122 uint16_t const *pu16Src;
9123 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9124 if (rc == VINF_SUCCESS)
9125 {
9126 *pu16Dst = *pu16Src;
9127 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9128 }
9129 return rc;
9130}
9131
9132
9133#ifdef IEM_WITH_SETJMP
9134/**
9135 * Fetches a data word, longjmp on error.
9136 *
9137 * @returns The word
9138 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9139 * @param iSegReg The index of the segment register to use for
9140 * this access. The base and limits are checked.
9141 * @param GCPtrMem The address of the guest memory.
9142 */
9143DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9144{
9145 /* The lazy approach for now... */
9146 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9147 uint16_t const u16Ret = *pu16Src;
9148 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9149 return u16Ret;
9150}
9151#endif
9152
9153
9154/**
9155 * Fetches a data dword.
9156 *
9157 * @returns Strict VBox status code.
9158 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9159 * @param pu32Dst Where to return the dword.
9160 * @param iSegReg The index of the segment register to use for
9161 * this access. The base and limits are checked.
9162 * @param GCPtrMem The address of the guest memory.
9163 */
9164IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9165{
9166 /* The lazy approach for now... */
9167 uint32_t const *pu32Src;
9168 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9169 if (rc == VINF_SUCCESS)
9170 {
9171 *pu32Dst = *pu32Src;
9172 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9173 }
9174 return rc;
9175}
9176
9177
9178/**
9179 * Fetches a data dword and zero extends it to a qword.
9180 *
9181 * @returns Strict VBox status code.
9182 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9183 * @param pu64Dst Where to return the qword.
9184 * @param iSegReg The index of the segment register to use for
9185 * this access. The base and limits are checked.
9186 * @param GCPtrMem The address of the guest memory.
9187 */
9188IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9189{
9190 /* The lazy approach for now... */
9191 uint32_t const *pu32Src;
9192 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9193 if (rc == VINF_SUCCESS)
9194 {
9195 *pu64Dst = *pu32Src;
9196 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9197 }
9198 return rc;
9199}
9200
9201
9202#ifdef IEM_WITH_SETJMP
9203
9204IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9205{
9206 Assert(cbMem >= 1);
9207 Assert(iSegReg < X86_SREG_COUNT);
9208
9209 /*
9210 * 64-bit mode is simpler.
9211 */
9212 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9213 {
9214 if (iSegReg >= X86_SREG_FS)
9215 {
9216 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9217 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9218 GCPtrMem += pSel->u64Base;
9219 }
9220
9221 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9222 return GCPtrMem;
9223 }
9224 /*
9225 * 16-bit and 32-bit segmentation.
9226 */
9227 else
9228 {
9229 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9230 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9231 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9232 == X86DESCATTR_P /* data, expand up */
9233 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9234 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9235 {
9236 /* expand up */
9237 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9238 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9239 && GCPtrLast32 > (uint32_t)GCPtrMem))
9240 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9241 }
9242 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9243 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9244 {
9245 /* expand down */
9246 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9247 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9248 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9249 && GCPtrLast32 > (uint32_t)GCPtrMem))
9250 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9251 }
9252 else
9253 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9254 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9255 }
9256 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9257}
9258
9259
9260IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9261{
9262 Assert(cbMem >= 1);
9263 Assert(iSegReg < X86_SREG_COUNT);
9264
9265 /*
9266 * 64-bit mode is simpler.
9267 */
9268 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9269 {
9270 if (iSegReg >= X86_SREG_FS)
9271 {
9272 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9273 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9274 GCPtrMem += pSel->u64Base;
9275 }
9276
9277 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9278 return GCPtrMem;
9279 }
9280 /*
9281 * 16-bit and 32-bit segmentation.
9282 */
9283 else
9284 {
9285 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9286 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9287 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9288 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9289 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9290 {
9291 /* expand up */
9292 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9293 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9294 && GCPtrLast32 > (uint32_t)GCPtrMem))
9295 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9296 }
9297 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9298 {
9299 /* expand down */
9300 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9301 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9302 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9303 && GCPtrLast32 > (uint32_t)GCPtrMem))
9304 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9305 }
9306 else
9307 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9308 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9309 }
9310 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9311}
9312
9313
9314/**
9315 * Fetches a data dword, longjmp on error, fallback/safe version.
9316 *
9317 * @returns The dword
9318 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9319 * @param iSegReg The index of the segment register to use for
9320 * this access. The base and limits are checked.
9321 * @param GCPtrMem The address of the guest memory.
9322 */
9323IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9324{
9325 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9326 uint32_t const u32Ret = *pu32Src;
9327 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9328 return u32Ret;
9329}
9330
9331
9332/**
9333 * Fetches a data dword, longjmp on error.
9334 *
9335 * @returns The dword
9336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9337 * @param iSegReg The index of the segment register to use for
9338 * this access. The base and limits are checked.
9339 * @param GCPtrMem The address of the guest memory.
9340 */
9341DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9342{
9343# ifdef IEM_WITH_DATA_TLB
9344 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9345 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9346 {
9347 /// @todo more later.
9348 }
9349
9350 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9351# else
9352 /* The lazy approach. */
9353 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9354 uint32_t const u32Ret = *pu32Src;
9355 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9356 return u32Ret;
9357# endif
9358}
9359#endif
9360
9361
9362#ifdef SOME_UNUSED_FUNCTION
9363/**
9364 * Fetches a data dword and sign extends it to a qword.
9365 *
9366 * @returns Strict VBox status code.
9367 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9368 * @param pu64Dst Where to return the sign extended value.
9369 * @param iSegReg The index of the segment register to use for
9370 * this access. The base and limits are checked.
9371 * @param GCPtrMem The address of the guest memory.
9372 */
9373IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9374{
9375 /* The lazy approach for now... */
9376 int32_t const *pi32Src;
9377 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9378 if (rc == VINF_SUCCESS)
9379 {
9380 *pu64Dst = *pi32Src;
9381 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9382 }
9383#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9384 else
9385 *pu64Dst = 0;
9386#endif
9387 return rc;
9388}
9389#endif
9390
9391
9392/**
9393 * Fetches a data qword.
9394 *
9395 * @returns Strict VBox status code.
9396 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9397 * @param pu64Dst Where to return the qword.
9398 * @param iSegReg The index of the segment register to use for
9399 * this access. The base and limits are checked.
9400 * @param GCPtrMem The address of the guest memory.
9401 */
9402IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9403{
9404 /* The lazy approach for now... */
9405 uint64_t const *pu64Src;
9406 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9407 if (rc == VINF_SUCCESS)
9408 {
9409 *pu64Dst = *pu64Src;
9410 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9411 }
9412 return rc;
9413}
9414
9415
9416#ifdef IEM_WITH_SETJMP
9417/**
9418 * Fetches a data qword, longjmp on error.
9419 *
9420 * @returns The qword.
9421 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9422 * @param iSegReg The index of the segment register to use for
9423 * this access. The base and limits are checked.
9424 * @param GCPtrMem The address of the guest memory.
9425 */
9426DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9427{
9428 /* The lazy approach for now... */
9429 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9430 uint64_t const u64Ret = *pu64Src;
9431 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9432 return u64Ret;
9433}
9434#endif
9435
9436
9437/**
9438 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9439 *
9440 * @returns Strict VBox status code.
9441 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9442 * @param pu64Dst Where to return the qword.
9443 * @param iSegReg The index of the segment register to use for
9444 * this access. The base and limits are checked.
9445 * @param GCPtrMem The address of the guest memory.
9446 */
9447IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9448{
9449 /* The lazy approach for now... */
9450 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9451 if (RT_UNLIKELY(GCPtrMem & 15))
9452 return iemRaiseGeneralProtectionFault0(pVCpu);
9453
9454 uint64_t const *pu64Src;
9455 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9456 if (rc == VINF_SUCCESS)
9457 {
9458 *pu64Dst = *pu64Src;
9459 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9460 }
9461 return rc;
9462}
9463
9464
9465#ifdef IEM_WITH_SETJMP
9466/**
9467 * Fetches a data qword, longjmp on error.
9468 *
9469 * @returns The qword.
9470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9471 * @param iSegReg The index of the segment register to use for
9472 * this access. The base and limits are checked.
9473 * @param GCPtrMem The address of the guest memory.
9474 */
9475DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9476{
9477 /* The lazy approach for now... */
9478 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9479 if (RT_LIKELY(!(GCPtrMem & 15)))
9480 {
9481 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9482 uint64_t const u64Ret = *pu64Src;
9483 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9484 return u64Ret;
9485 }
9486
9487 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9488 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9489}
9490#endif
9491
9492
9493/**
9494 * Fetches a data tword.
9495 *
9496 * @returns Strict VBox status code.
9497 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9498 * @param pr80Dst Where to return the tword.
9499 * @param iSegReg The index of the segment register to use for
9500 * this access. The base and limits are checked.
9501 * @param GCPtrMem The address of the guest memory.
9502 */
9503IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9504{
9505 /* The lazy approach for now... */
9506 PCRTFLOAT80U pr80Src;
9507 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9508 if (rc == VINF_SUCCESS)
9509 {
9510 *pr80Dst = *pr80Src;
9511 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9512 }
9513 return rc;
9514}
9515
9516
9517#ifdef IEM_WITH_SETJMP
9518/**
9519 * Fetches a data tword, longjmp on error.
9520 *
9521 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9522 * @param pr80Dst Where to return the tword.
9523 * @param iSegReg The index of the segment register to use for
9524 * this access. The base and limits are checked.
9525 * @param GCPtrMem The address of the guest memory.
9526 */
9527DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9528{
9529 /* The lazy approach for now... */
9530 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9531 *pr80Dst = *pr80Src;
9532 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9533}
9534#endif
9535
9536
9537/**
9538 * Fetches a data dqword (double qword), generally SSE related.
9539 *
9540 * @returns Strict VBox status code.
9541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9542 * @param pu128Dst Where to return the qword.
9543 * @param iSegReg The index of the segment register to use for
9544 * this access. The base and limits are checked.
9545 * @param GCPtrMem The address of the guest memory.
9546 */
9547IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9548{
9549 /* The lazy approach for now... */
9550 PCRTUINT128U pu128Src;
9551 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9552 if (rc == VINF_SUCCESS)
9553 {
9554 pu128Dst->au64[0] = pu128Src->au64[0];
9555 pu128Dst->au64[1] = pu128Src->au64[1];
9556 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9557 }
9558 return rc;
9559}
9560
9561
9562#ifdef IEM_WITH_SETJMP
9563/**
9564 * Fetches a data dqword (double qword), generally SSE related.
9565 *
9566 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9567 * @param pu128Dst Where to return the qword.
9568 * @param iSegReg The index of the segment register to use for
9569 * this access. The base and limits are checked.
9570 * @param GCPtrMem The address of the guest memory.
9571 */
9572IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9573{
9574 /* The lazy approach for now... */
9575 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9576 pu128Dst->au64[0] = pu128Src->au64[0];
9577 pu128Dst->au64[1] = pu128Src->au64[1];
9578 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9579}
9580#endif
9581
9582
9583/**
9584 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9585 * related.
9586 *
9587 * Raises \#GP(0) if not aligned.
9588 *
9589 * @returns Strict VBox status code.
9590 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9591 * @param pu128Dst Where to return the qword.
9592 * @param iSegReg The index of the segment register to use for
9593 * this access. The base and limits are checked.
9594 * @param GCPtrMem The address of the guest memory.
9595 */
9596IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9597{
9598 /* The lazy approach for now... */
9599 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9600 if ( (GCPtrMem & 15)
9601 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9602 return iemRaiseGeneralProtectionFault0(pVCpu);
9603
9604 PCRTUINT128U pu128Src;
9605 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9606 if (rc == VINF_SUCCESS)
9607 {
9608 pu128Dst->au64[0] = pu128Src->au64[0];
9609 pu128Dst->au64[1] = pu128Src->au64[1];
9610 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9611 }
9612 return rc;
9613}
9614
9615
9616#ifdef IEM_WITH_SETJMP
9617/**
9618 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9619 * related, longjmp on error.
9620 *
9621 * Raises \#GP(0) if not aligned.
9622 *
9623 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9624 * @param pu128Dst Where to return the qword.
9625 * @param iSegReg The index of the segment register to use for
9626 * this access. The base and limits are checked.
9627 * @param GCPtrMem The address of the guest memory.
9628 */
9629DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9630{
9631 /* The lazy approach for now... */
9632 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9633 if ( (GCPtrMem & 15) == 0
9634 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9635 {
9636 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9637 pu128Dst->au64[0] = pu128Src->au64[0];
9638 pu128Dst->au64[1] = pu128Src->au64[1];
9639 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9640 return;
9641 }
9642
9643 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9644 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9645}
9646#endif
9647
9648
9649/**
9650 * Fetches a data oword (octo word), generally AVX related.
9651 *
9652 * @returns Strict VBox status code.
9653 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9654 * @param pu256Dst Where to return the qword.
9655 * @param iSegReg The index of the segment register to use for
9656 * this access. The base and limits are checked.
9657 * @param GCPtrMem The address of the guest memory.
9658 */
9659IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9660{
9661 /* The lazy approach for now... */
9662 PCRTUINT256U pu256Src;
9663 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9664 if (rc == VINF_SUCCESS)
9665 {
9666 pu256Dst->au64[0] = pu256Src->au64[0];
9667 pu256Dst->au64[1] = pu256Src->au64[1];
9668 pu256Dst->au64[2] = pu256Src->au64[2];
9669 pu256Dst->au64[3] = pu256Src->au64[3];
9670 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9671 }
9672 return rc;
9673}
9674
9675
9676#ifdef IEM_WITH_SETJMP
9677/**
9678 * Fetches a data oword (octo word), generally AVX related.
9679 *
9680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9681 * @param pu256Dst Where to return the qword.
9682 * @param iSegReg The index of the segment register to use for
9683 * this access. The base and limits are checked.
9684 * @param GCPtrMem The address of the guest memory.
9685 */
9686IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9687{
9688 /* The lazy approach for now... */
9689 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9690 pu256Dst->au64[0] = pu256Src->au64[0];
9691 pu256Dst->au64[1] = pu256Src->au64[1];
9692 pu256Dst->au64[2] = pu256Src->au64[2];
9693 pu256Dst->au64[3] = pu256Src->au64[3];
9694 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9695}
9696#endif
9697
9698
9699/**
9700 * Fetches a data oword (octo word) at an aligned address, generally AVX
9701 * related.
9702 *
9703 * Raises \#GP(0) if not aligned.
9704 *
9705 * @returns Strict VBox status code.
9706 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9707 * @param pu256Dst Where to return the qword.
9708 * @param iSegReg The index of the segment register to use for
9709 * this access. The base and limits are checked.
9710 * @param GCPtrMem The address of the guest memory.
9711 */
9712IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9713{
9714 /* The lazy approach for now... */
9715 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9716 if (GCPtrMem & 31)
9717 return iemRaiseGeneralProtectionFault0(pVCpu);
9718
9719 PCRTUINT256U pu256Src;
9720 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9721 if (rc == VINF_SUCCESS)
9722 {
9723 pu256Dst->au64[0] = pu256Src->au64[0];
9724 pu256Dst->au64[1] = pu256Src->au64[1];
9725 pu256Dst->au64[2] = pu256Src->au64[2];
9726 pu256Dst->au64[3] = pu256Src->au64[3];
9727 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9728 }
9729 return rc;
9730}
9731
9732
9733#ifdef IEM_WITH_SETJMP
9734/**
9735 * Fetches a data oword (octo word) at an aligned address, generally AVX
9736 * related, longjmp on error.
9737 *
9738 * Raises \#GP(0) if not aligned.
9739 *
9740 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9741 * @param pu256Dst Where to return the qword.
9742 * @param iSegReg The index of the segment register to use for
9743 * this access. The base and limits are checked.
9744 * @param GCPtrMem The address of the guest memory.
9745 */
9746DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9747{
9748 /* The lazy approach for now... */
9749 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9750 if ((GCPtrMem & 31) == 0)
9751 {
9752 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9753 pu256Dst->au64[0] = pu256Src->au64[0];
9754 pu256Dst->au64[1] = pu256Src->au64[1];
9755 pu256Dst->au64[2] = pu256Src->au64[2];
9756 pu256Dst->au64[3] = pu256Src->au64[3];
9757 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9758 return;
9759 }
9760
9761 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9762 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9763}
9764#endif
9765
9766
9767
9768/**
9769 * Fetches a descriptor register (lgdt, lidt).
9770 *
9771 * @returns Strict VBox status code.
9772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9773 * @param pcbLimit Where to return the limit.
9774 * @param pGCPtrBase Where to return the base.
9775 * @param iSegReg The index of the segment register to use for
9776 * this access. The base and limits are checked.
9777 * @param GCPtrMem The address of the guest memory.
9778 * @param enmOpSize The effective operand size.
9779 */
9780IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9781 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9782{
9783 /*
9784 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9785 * little special:
9786 * - The two reads are done separately.
9787 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9788 * - We suspect the 386 to actually commit the limit before the base in
9789 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9790 * don't try emulate this eccentric behavior, because it's not well
9791 * enough understood and rather hard to trigger.
9792 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9793 */
9794 VBOXSTRICTRC rcStrict;
9795 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9796 {
9797 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9798 if (rcStrict == VINF_SUCCESS)
9799 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9800 }
9801 else
9802 {
9803 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9804 if (enmOpSize == IEMMODE_32BIT)
9805 {
9806 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9807 {
9808 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9809 if (rcStrict == VINF_SUCCESS)
9810 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9811 }
9812 else
9813 {
9814 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9815 if (rcStrict == VINF_SUCCESS)
9816 {
9817 *pcbLimit = (uint16_t)uTmp;
9818 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9819 }
9820 }
9821 if (rcStrict == VINF_SUCCESS)
9822 *pGCPtrBase = uTmp;
9823 }
9824 else
9825 {
9826 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9827 if (rcStrict == VINF_SUCCESS)
9828 {
9829 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9830 if (rcStrict == VINF_SUCCESS)
9831 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9832 }
9833 }
9834 }
9835 return rcStrict;
9836}
9837
9838
9839
9840/**
9841 * Stores a data byte.
9842 *
9843 * @returns Strict VBox status code.
9844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9845 * @param iSegReg The index of the segment register to use for
9846 * this access. The base and limits are checked.
9847 * @param GCPtrMem The address of the guest memory.
9848 * @param u8Value The value to store.
9849 */
9850IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9851{
9852 /* The lazy approach for now... */
9853 uint8_t *pu8Dst;
9854 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9855 if (rc == VINF_SUCCESS)
9856 {
9857 *pu8Dst = u8Value;
9858 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9859 }
9860 return rc;
9861}
9862
9863
9864#ifdef IEM_WITH_SETJMP
9865/**
9866 * Stores a data byte, longjmp on error.
9867 *
9868 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9869 * @param iSegReg The index of the segment register to use for
9870 * this access. The base and limits are checked.
9871 * @param GCPtrMem The address of the guest memory.
9872 * @param u8Value The value to store.
9873 */
9874IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9875{
9876 /* The lazy approach for now... */
9877 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9878 *pu8Dst = u8Value;
9879 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9880}
9881#endif
9882
9883
9884/**
9885 * Stores a data word.
9886 *
9887 * @returns Strict VBox status code.
9888 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9889 * @param iSegReg The index of the segment register to use for
9890 * this access. The base and limits are checked.
9891 * @param GCPtrMem The address of the guest memory.
9892 * @param u16Value The value to store.
9893 */
9894IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9895{
9896 /* The lazy approach for now... */
9897 uint16_t *pu16Dst;
9898 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9899 if (rc == VINF_SUCCESS)
9900 {
9901 *pu16Dst = u16Value;
9902 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9903 }
9904 return rc;
9905}
9906
9907
9908#ifdef IEM_WITH_SETJMP
9909/**
9910 * Stores a data word, longjmp on error.
9911 *
9912 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9913 * @param iSegReg The index of the segment register to use for
9914 * this access. The base and limits are checked.
9915 * @param GCPtrMem The address of the guest memory.
9916 * @param u16Value The value to store.
9917 */
9918IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9919{
9920 /* The lazy approach for now... */
9921 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9922 *pu16Dst = u16Value;
9923 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9924}
9925#endif
9926
9927
9928/**
9929 * Stores a data dword.
9930 *
9931 * @returns Strict VBox status code.
9932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9933 * @param iSegReg The index of the segment register to use for
9934 * this access. The base and limits are checked.
9935 * @param GCPtrMem The address of the guest memory.
9936 * @param u32Value The value to store.
9937 */
9938IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9939{
9940 /* The lazy approach for now... */
9941 uint32_t *pu32Dst;
9942 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9943 if (rc == VINF_SUCCESS)
9944 {
9945 *pu32Dst = u32Value;
9946 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9947 }
9948 return rc;
9949}
9950
9951
9952#ifdef IEM_WITH_SETJMP
9953/**
9954 * Stores a data dword.
9955 *
9956 * @returns Strict VBox status code.
9957 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9958 * @param iSegReg The index of the segment register to use for
9959 * this access. The base and limits are checked.
9960 * @param GCPtrMem The address of the guest memory.
9961 * @param u32Value The value to store.
9962 */
9963IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9964{
9965 /* The lazy approach for now... */
9966 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9967 *pu32Dst = u32Value;
9968 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9969}
9970#endif
9971
9972
9973/**
9974 * Stores a data qword.
9975 *
9976 * @returns Strict VBox status code.
9977 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9978 * @param iSegReg The index of the segment register to use for
9979 * this access. The base and limits are checked.
9980 * @param GCPtrMem The address of the guest memory.
9981 * @param u64Value The value to store.
9982 */
9983IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9984{
9985 /* The lazy approach for now... */
9986 uint64_t *pu64Dst;
9987 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9988 if (rc == VINF_SUCCESS)
9989 {
9990 *pu64Dst = u64Value;
9991 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9992 }
9993 return rc;
9994}
9995
9996
9997#ifdef IEM_WITH_SETJMP
9998/**
9999 * Stores a data qword, longjmp on error.
10000 *
10001 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10002 * @param iSegReg The index of the segment register to use for
10003 * this access. The base and limits are checked.
10004 * @param GCPtrMem The address of the guest memory.
10005 * @param u64Value The value to store.
10006 */
10007IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10008{
10009 /* The lazy approach for now... */
10010 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10011 *pu64Dst = u64Value;
10012 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10013}
10014#endif
10015
10016
10017/**
10018 * Stores a data dqword.
10019 *
10020 * @returns Strict VBox status code.
10021 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10022 * @param iSegReg The index of the segment register to use for
10023 * this access. The base and limits are checked.
10024 * @param GCPtrMem The address of the guest memory.
10025 * @param u128Value The value to store.
10026 */
10027IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10028{
10029 /* The lazy approach for now... */
10030 PRTUINT128U pu128Dst;
10031 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10032 if (rc == VINF_SUCCESS)
10033 {
10034 pu128Dst->au64[0] = u128Value.au64[0];
10035 pu128Dst->au64[1] = u128Value.au64[1];
10036 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10037 }
10038 return rc;
10039}
10040
10041
10042#ifdef IEM_WITH_SETJMP
10043/**
10044 * Stores a data dqword, longjmp on error.
10045 *
10046 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10047 * @param iSegReg The index of the segment register to use for
10048 * this access. The base and limits are checked.
10049 * @param GCPtrMem The address of the guest memory.
10050 * @param u128Value The value to store.
10051 */
10052IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10053{
10054 /* The lazy approach for now... */
10055 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10056 pu128Dst->au64[0] = u128Value.au64[0];
10057 pu128Dst->au64[1] = u128Value.au64[1];
10058 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10059}
10060#endif
10061
10062
10063/**
10064 * Stores a data dqword, SSE aligned.
10065 *
10066 * @returns Strict VBox status code.
10067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10068 * @param iSegReg The index of the segment register to use for
10069 * this access. The base and limits are checked.
10070 * @param GCPtrMem The address of the guest memory.
10071 * @param u128Value The value to store.
10072 */
10073IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10074{
10075 /* The lazy approach for now... */
10076 if ( (GCPtrMem & 15)
10077 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10078 return iemRaiseGeneralProtectionFault0(pVCpu);
10079
10080 PRTUINT128U pu128Dst;
10081 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10082 if (rc == VINF_SUCCESS)
10083 {
10084 pu128Dst->au64[0] = u128Value.au64[0];
10085 pu128Dst->au64[1] = u128Value.au64[1];
10086 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10087 }
10088 return rc;
10089}
10090
10091
10092#ifdef IEM_WITH_SETJMP
10093/**
10094 * Stores a data dqword, SSE aligned.
10095 *
10096 * @returns Strict VBox status code.
10097 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10098 * @param iSegReg The index of the segment register to use for
10099 * this access. The base and limits are checked.
10100 * @param GCPtrMem The address of the guest memory.
10101 * @param u128Value The value to store.
10102 */
10103DECL_NO_INLINE(IEM_STATIC, void)
10104iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10105{
10106 /* The lazy approach for now... */
10107 if ( (GCPtrMem & 15) == 0
10108 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10109 {
10110 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10111 pu128Dst->au64[0] = u128Value.au64[0];
10112 pu128Dst->au64[1] = u128Value.au64[1];
10113 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10114 return;
10115 }
10116
10117 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10118 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10119}
10120#endif
10121
10122
10123/**
10124 * Stores a data dqword.
10125 *
10126 * @returns Strict VBox status code.
10127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10128 * @param iSegReg The index of the segment register to use for
10129 * this access. The base and limits are checked.
10130 * @param GCPtrMem The address of the guest memory.
10131 * @param pu256Value Pointer to the value to store.
10132 */
10133IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10134{
10135 /* The lazy approach for now... */
10136 PRTUINT256U pu256Dst;
10137 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10138 if (rc == VINF_SUCCESS)
10139 {
10140 pu256Dst->au64[0] = pu256Value->au64[0];
10141 pu256Dst->au64[1] = pu256Value->au64[1];
10142 pu256Dst->au64[2] = pu256Value->au64[2];
10143 pu256Dst->au64[3] = pu256Value->au64[3];
10144 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10145 }
10146 return rc;
10147}
10148
10149
10150#ifdef IEM_WITH_SETJMP
10151/**
10152 * Stores a data dqword, longjmp on error.
10153 *
10154 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10155 * @param iSegReg The index of the segment register to use for
10156 * this access. The base and limits are checked.
10157 * @param GCPtrMem The address of the guest memory.
10158 * @param pu256Value Pointer to the value to store.
10159 */
10160IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10161{
10162 /* The lazy approach for now... */
10163 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10164 pu256Dst->au64[0] = pu256Value->au64[0];
10165 pu256Dst->au64[1] = pu256Value->au64[1];
10166 pu256Dst->au64[2] = pu256Value->au64[2];
10167 pu256Dst->au64[3] = pu256Value->au64[3];
10168 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10169}
10170#endif
10171
10172
10173/**
10174 * Stores a data dqword, AVX aligned.
10175 *
10176 * @returns Strict VBox status code.
10177 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10178 * @param iSegReg The index of the segment register to use for
10179 * this access. The base and limits are checked.
10180 * @param GCPtrMem The address of the guest memory.
10181 * @param pu256Value Pointer to the value to store.
10182 */
10183IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10184{
10185 /* The lazy approach for now... */
10186 if (GCPtrMem & 31)
10187 return iemRaiseGeneralProtectionFault0(pVCpu);
10188
10189 PRTUINT256U pu256Dst;
10190 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10191 if (rc == VINF_SUCCESS)
10192 {
10193 pu256Dst->au64[0] = pu256Value->au64[0];
10194 pu256Dst->au64[1] = pu256Value->au64[1];
10195 pu256Dst->au64[2] = pu256Value->au64[2];
10196 pu256Dst->au64[3] = pu256Value->au64[3];
10197 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10198 }
10199 return rc;
10200}
10201
10202
10203#ifdef IEM_WITH_SETJMP
10204/**
10205 * Stores a data dqword, AVX aligned.
10206 *
10207 * @returns Strict VBox status code.
10208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10209 * @param iSegReg The index of the segment register to use for
10210 * this access. The base and limits are checked.
10211 * @param GCPtrMem The address of the guest memory.
10212 * @param pu256Value Pointer to the value to store.
10213 */
10214DECL_NO_INLINE(IEM_STATIC, void)
10215iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10216{
10217 /* The lazy approach for now... */
10218 if ((GCPtrMem & 31) == 0)
10219 {
10220 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10221 pu256Dst->au64[0] = pu256Value->au64[0];
10222 pu256Dst->au64[1] = pu256Value->au64[1];
10223 pu256Dst->au64[2] = pu256Value->au64[2];
10224 pu256Dst->au64[3] = pu256Value->au64[3];
10225 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10226 return;
10227 }
10228
10229 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10230 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10231}
10232#endif
10233
10234
10235/**
10236 * Stores a descriptor register (sgdt, sidt).
10237 *
10238 * @returns Strict VBox status code.
10239 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10240 * @param cbLimit The limit.
10241 * @param GCPtrBase The base address.
10242 * @param iSegReg The index of the segment register to use for
10243 * this access. The base and limits are checked.
10244 * @param GCPtrMem The address of the guest memory.
10245 */
10246IEM_STATIC VBOXSTRICTRC
10247iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10248{
10249 /*
10250 * The SIDT and SGDT instructions actually stores the data using two
10251 * independent writes. The instructions does not respond to opsize prefixes.
10252 */
10253 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10254 if (rcStrict == VINF_SUCCESS)
10255 {
10256 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10257 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10258 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10259 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10260 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10261 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10262 else
10263 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10264 }
10265 return rcStrict;
10266}
10267
10268
10269/**
10270 * Pushes a word onto the stack.
10271 *
10272 * @returns Strict VBox status code.
10273 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10274 * @param u16Value The value to push.
10275 */
10276IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10277{
10278 /* Increment the stack pointer. */
10279 uint64_t uNewRsp;
10280 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10281
10282 /* Write the word the lazy way. */
10283 uint16_t *pu16Dst;
10284 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10285 if (rc == VINF_SUCCESS)
10286 {
10287 *pu16Dst = u16Value;
10288 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10289 }
10290
10291 /* Commit the new RSP value unless we an access handler made trouble. */
10292 if (rc == VINF_SUCCESS)
10293 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10294
10295 return rc;
10296}
10297
10298
10299/**
10300 * Pushes a dword onto the stack.
10301 *
10302 * @returns Strict VBox status code.
10303 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10304 * @param u32Value The value to push.
10305 */
10306IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10307{
10308 /* Increment the stack pointer. */
10309 uint64_t uNewRsp;
10310 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10311
10312 /* Write the dword the lazy way. */
10313 uint32_t *pu32Dst;
10314 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10315 if (rc == VINF_SUCCESS)
10316 {
10317 *pu32Dst = u32Value;
10318 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10319 }
10320
10321 /* Commit the new RSP value unless we an access handler made trouble. */
10322 if (rc == VINF_SUCCESS)
10323 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10324
10325 return rc;
10326}
10327
10328
10329/**
10330 * Pushes a dword segment register value onto the stack.
10331 *
10332 * @returns Strict VBox status code.
10333 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10334 * @param u32Value The value to push.
10335 */
10336IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10337{
10338 /* Increment the stack pointer. */
10339 uint64_t uNewRsp;
10340 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10341
10342 /* The intel docs talks about zero extending the selector register
10343 value. My actual intel CPU here might be zero extending the value
10344 but it still only writes the lower word... */
10345 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10346 * happens when crossing an electric page boundrary, is the high word checked
10347 * for write accessibility or not? Probably it is. What about segment limits?
10348 * It appears this behavior is also shared with trap error codes.
10349 *
10350 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10351 * ancient hardware when it actually did change. */
10352 uint16_t *pu16Dst;
10353 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10354 if (rc == VINF_SUCCESS)
10355 {
10356 *pu16Dst = (uint16_t)u32Value;
10357 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10358 }
10359
10360 /* Commit the new RSP value unless we an access handler made trouble. */
10361 if (rc == VINF_SUCCESS)
10362 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10363
10364 return rc;
10365}
10366
10367
10368/**
10369 * Pushes a qword onto the stack.
10370 *
10371 * @returns Strict VBox status code.
10372 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10373 * @param u64Value The value to push.
10374 */
10375IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10376{
10377 /* Increment the stack pointer. */
10378 uint64_t uNewRsp;
10379 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10380
10381 /* Write the word the lazy way. */
10382 uint64_t *pu64Dst;
10383 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10384 if (rc == VINF_SUCCESS)
10385 {
10386 *pu64Dst = u64Value;
10387 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10388 }
10389
10390 /* Commit the new RSP value unless we an access handler made trouble. */
10391 if (rc == VINF_SUCCESS)
10392 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10393
10394 return rc;
10395}
10396
10397
10398/**
10399 * Pops a word from the stack.
10400 *
10401 * @returns Strict VBox status code.
10402 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10403 * @param pu16Value Where to store the popped value.
10404 */
10405IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10406{
10407 /* Increment the stack pointer. */
10408 uint64_t uNewRsp;
10409 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10410
10411 /* Write the word the lazy way. */
10412 uint16_t const *pu16Src;
10413 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10414 if (rc == VINF_SUCCESS)
10415 {
10416 *pu16Value = *pu16Src;
10417 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10418
10419 /* Commit the new RSP value. */
10420 if (rc == VINF_SUCCESS)
10421 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10422 }
10423
10424 return rc;
10425}
10426
10427
10428/**
10429 * Pops a dword from the stack.
10430 *
10431 * @returns Strict VBox status code.
10432 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10433 * @param pu32Value Where to store the popped value.
10434 */
10435IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10436{
10437 /* Increment the stack pointer. */
10438 uint64_t uNewRsp;
10439 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10440
10441 /* Write the word the lazy way. */
10442 uint32_t const *pu32Src;
10443 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10444 if (rc == VINF_SUCCESS)
10445 {
10446 *pu32Value = *pu32Src;
10447 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10448
10449 /* Commit the new RSP value. */
10450 if (rc == VINF_SUCCESS)
10451 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10452 }
10453
10454 return rc;
10455}
10456
10457
10458/**
10459 * Pops a qword from the stack.
10460 *
10461 * @returns Strict VBox status code.
10462 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10463 * @param pu64Value Where to store the popped value.
10464 */
10465IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10466{
10467 /* Increment the stack pointer. */
10468 uint64_t uNewRsp;
10469 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10470
10471 /* Write the word the lazy way. */
10472 uint64_t const *pu64Src;
10473 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10474 if (rc == VINF_SUCCESS)
10475 {
10476 *pu64Value = *pu64Src;
10477 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10478
10479 /* Commit the new RSP value. */
10480 if (rc == VINF_SUCCESS)
10481 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10482 }
10483
10484 return rc;
10485}
10486
10487
10488/**
10489 * Pushes a word onto the stack, using a temporary stack pointer.
10490 *
10491 * @returns Strict VBox status code.
10492 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10493 * @param u16Value The value to push.
10494 * @param pTmpRsp Pointer to the temporary stack pointer.
10495 */
10496IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10497{
10498 /* Increment the stack pointer. */
10499 RTUINT64U NewRsp = *pTmpRsp;
10500 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10501
10502 /* Write the word the lazy way. */
10503 uint16_t *pu16Dst;
10504 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10505 if (rc == VINF_SUCCESS)
10506 {
10507 *pu16Dst = u16Value;
10508 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10509 }
10510
10511 /* Commit the new RSP value unless we an access handler made trouble. */
10512 if (rc == VINF_SUCCESS)
10513 *pTmpRsp = NewRsp;
10514
10515 return rc;
10516}
10517
10518
10519/**
10520 * Pushes a dword onto the stack, using a temporary stack pointer.
10521 *
10522 * @returns Strict VBox status code.
10523 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10524 * @param u32Value The value to push.
10525 * @param pTmpRsp Pointer to the temporary stack pointer.
10526 */
10527IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10528{
10529 /* Increment the stack pointer. */
10530 RTUINT64U NewRsp = *pTmpRsp;
10531 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10532
10533 /* Write the word the lazy way. */
10534 uint32_t *pu32Dst;
10535 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10536 if (rc == VINF_SUCCESS)
10537 {
10538 *pu32Dst = u32Value;
10539 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10540 }
10541
10542 /* Commit the new RSP value unless we an access handler made trouble. */
10543 if (rc == VINF_SUCCESS)
10544 *pTmpRsp = NewRsp;
10545
10546 return rc;
10547}
10548
10549
10550/**
10551 * Pushes a dword onto the stack, using a temporary stack pointer.
10552 *
10553 * @returns Strict VBox status code.
10554 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10555 * @param u64Value The value to push.
10556 * @param pTmpRsp Pointer to the temporary stack pointer.
10557 */
10558IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10559{
10560 /* Increment the stack pointer. */
10561 RTUINT64U NewRsp = *pTmpRsp;
10562 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10563
10564 /* Write the word the lazy way. */
10565 uint64_t *pu64Dst;
10566 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10567 if (rc == VINF_SUCCESS)
10568 {
10569 *pu64Dst = u64Value;
10570 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10571 }
10572
10573 /* Commit the new RSP value unless we an access handler made trouble. */
10574 if (rc == VINF_SUCCESS)
10575 *pTmpRsp = NewRsp;
10576
10577 return rc;
10578}
10579
10580
10581/**
10582 * Pops a word from the stack, using a temporary stack pointer.
10583 *
10584 * @returns Strict VBox status code.
10585 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10586 * @param pu16Value Where to store the popped value.
10587 * @param pTmpRsp Pointer to the temporary stack pointer.
10588 */
10589IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10590{
10591 /* Increment the stack pointer. */
10592 RTUINT64U NewRsp = *pTmpRsp;
10593 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10594
10595 /* Write the word the lazy way. */
10596 uint16_t const *pu16Src;
10597 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10598 if (rc == VINF_SUCCESS)
10599 {
10600 *pu16Value = *pu16Src;
10601 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10602
10603 /* Commit the new RSP value. */
10604 if (rc == VINF_SUCCESS)
10605 *pTmpRsp = NewRsp;
10606 }
10607
10608 return rc;
10609}
10610
10611
10612/**
10613 * Pops a dword from the stack, using a temporary stack pointer.
10614 *
10615 * @returns Strict VBox status code.
10616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10617 * @param pu32Value Where to store the popped value.
10618 * @param pTmpRsp Pointer to the temporary stack pointer.
10619 */
10620IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10621{
10622 /* Increment the stack pointer. */
10623 RTUINT64U NewRsp = *pTmpRsp;
10624 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10625
10626 /* Write the word the lazy way. */
10627 uint32_t const *pu32Src;
10628 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10629 if (rc == VINF_SUCCESS)
10630 {
10631 *pu32Value = *pu32Src;
10632 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10633
10634 /* Commit the new RSP value. */
10635 if (rc == VINF_SUCCESS)
10636 *pTmpRsp = NewRsp;
10637 }
10638
10639 return rc;
10640}
10641
10642
10643/**
10644 * Pops a qword from the stack, using a temporary stack pointer.
10645 *
10646 * @returns Strict VBox status code.
10647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10648 * @param pu64Value Where to store the popped value.
10649 * @param pTmpRsp Pointer to the temporary stack pointer.
10650 */
10651IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10652{
10653 /* Increment the stack pointer. */
10654 RTUINT64U NewRsp = *pTmpRsp;
10655 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10656
10657 /* Write the word the lazy way. */
10658 uint64_t const *pu64Src;
10659 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10660 if (rcStrict == VINF_SUCCESS)
10661 {
10662 *pu64Value = *pu64Src;
10663 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10664
10665 /* Commit the new RSP value. */
10666 if (rcStrict == VINF_SUCCESS)
10667 *pTmpRsp = NewRsp;
10668 }
10669
10670 return rcStrict;
10671}
10672
10673
10674/**
10675 * Begin a special stack push (used by interrupt, exceptions and such).
10676 *
10677 * This will raise \#SS or \#PF if appropriate.
10678 *
10679 * @returns Strict VBox status code.
10680 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10681 * @param cbMem The number of bytes to push onto the stack.
10682 * @param ppvMem Where to return the pointer to the stack memory.
10683 * As with the other memory functions this could be
10684 * direct access or bounce buffered access, so
10685 * don't commit register until the commit call
10686 * succeeds.
10687 * @param puNewRsp Where to return the new RSP value. This must be
10688 * passed unchanged to
10689 * iemMemStackPushCommitSpecial().
10690 */
10691IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10692{
10693 Assert(cbMem < UINT8_MAX);
10694 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10695 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10696}
10697
10698
10699/**
10700 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10701 *
10702 * This will update the rSP.
10703 *
10704 * @returns Strict VBox status code.
10705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10706 * @param pvMem The pointer returned by
10707 * iemMemStackPushBeginSpecial().
10708 * @param uNewRsp The new RSP value returned by
10709 * iemMemStackPushBeginSpecial().
10710 */
10711IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10712{
10713 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10714 if (rcStrict == VINF_SUCCESS)
10715 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10716 return rcStrict;
10717}
10718
10719
10720/**
10721 * Begin a special stack pop (used by iret, retf and such).
10722 *
10723 * This will raise \#SS or \#PF if appropriate.
10724 *
10725 * @returns Strict VBox status code.
10726 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10727 * @param cbMem The number of bytes to pop from the stack.
10728 * @param ppvMem Where to return the pointer to the stack memory.
10729 * @param puNewRsp Where to return the new RSP value. This must be
10730 * assigned to CPUMCTX::rsp manually some time
10731 * after iemMemStackPopDoneSpecial() has been
10732 * called.
10733 */
10734IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10735{
10736 Assert(cbMem < UINT8_MAX);
10737 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10738 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10739}
10740
10741
10742/**
10743 * Continue a special stack pop (used by iret and retf).
10744 *
10745 * This will raise \#SS or \#PF if appropriate.
10746 *
10747 * @returns Strict VBox status code.
10748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10749 * @param cbMem The number of bytes to pop from the stack.
10750 * @param ppvMem Where to return the pointer to the stack memory.
10751 * @param puNewRsp Where to return the new RSP value. This must be
10752 * assigned to CPUMCTX::rsp manually some time
10753 * after iemMemStackPopDoneSpecial() has been
10754 * called.
10755 */
10756IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10757{
10758 Assert(cbMem < UINT8_MAX);
10759 RTUINT64U NewRsp;
10760 NewRsp.u = *puNewRsp;
10761 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10762 *puNewRsp = NewRsp.u;
10763 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10764}
10765
10766
10767/**
10768 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10769 * iemMemStackPopContinueSpecial).
10770 *
10771 * The caller will manually commit the rSP.
10772 *
10773 * @returns Strict VBox status code.
10774 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10775 * @param pvMem The pointer returned by
10776 * iemMemStackPopBeginSpecial() or
10777 * iemMemStackPopContinueSpecial().
10778 */
10779IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10780{
10781 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10782}
10783
10784
10785/**
10786 * Fetches a system table byte.
10787 *
10788 * @returns Strict VBox status code.
10789 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10790 * @param pbDst Where to return the byte.
10791 * @param iSegReg The index of the segment register to use for
10792 * this access. The base and limits are checked.
10793 * @param GCPtrMem The address of the guest memory.
10794 */
10795IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10796{
10797 /* The lazy approach for now... */
10798 uint8_t const *pbSrc;
10799 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10800 if (rc == VINF_SUCCESS)
10801 {
10802 *pbDst = *pbSrc;
10803 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10804 }
10805 return rc;
10806}
10807
10808
10809/**
10810 * Fetches a system table word.
10811 *
10812 * @returns Strict VBox status code.
10813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10814 * @param pu16Dst Where to return the word.
10815 * @param iSegReg The index of the segment register to use for
10816 * this access. The base and limits are checked.
10817 * @param GCPtrMem The address of the guest memory.
10818 */
10819IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10820{
10821 /* The lazy approach for now... */
10822 uint16_t const *pu16Src;
10823 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10824 if (rc == VINF_SUCCESS)
10825 {
10826 *pu16Dst = *pu16Src;
10827 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10828 }
10829 return rc;
10830}
10831
10832
10833/**
10834 * Fetches a system table dword.
10835 *
10836 * @returns Strict VBox status code.
10837 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10838 * @param pu32Dst Where to return the dword.
10839 * @param iSegReg The index of the segment register to use for
10840 * this access. The base and limits are checked.
10841 * @param GCPtrMem The address of the guest memory.
10842 */
10843IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10844{
10845 /* The lazy approach for now... */
10846 uint32_t const *pu32Src;
10847 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10848 if (rc == VINF_SUCCESS)
10849 {
10850 *pu32Dst = *pu32Src;
10851 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10852 }
10853 return rc;
10854}
10855
10856
10857/**
10858 * Fetches a system table qword.
10859 *
10860 * @returns Strict VBox status code.
10861 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10862 * @param pu64Dst Where to return the qword.
10863 * @param iSegReg The index of the segment register to use for
10864 * this access. The base and limits are checked.
10865 * @param GCPtrMem The address of the guest memory.
10866 */
10867IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10868{
10869 /* The lazy approach for now... */
10870 uint64_t const *pu64Src;
10871 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10872 if (rc == VINF_SUCCESS)
10873 {
10874 *pu64Dst = *pu64Src;
10875 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10876 }
10877 return rc;
10878}
10879
10880
10881/**
10882 * Fetches a descriptor table entry with caller specified error code.
10883 *
10884 * @returns Strict VBox status code.
10885 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10886 * @param pDesc Where to return the descriptor table entry.
10887 * @param uSel The selector which table entry to fetch.
10888 * @param uXcpt The exception to raise on table lookup error.
10889 * @param uErrorCode The error code associated with the exception.
10890 */
10891IEM_STATIC VBOXSTRICTRC
10892iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10893{
10894 AssertPtr(pDesc);
10895 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10896
10897 /** @todo did the 286 require all 8 bytes to be accessible? */
10898 /*
10899 * Get the selector table base and check bounds.
10900 */
10901 RTGCPTR GCPtrBase;
10902 if (uSel & X86_SEL_LDT)
10903 {
10904 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10905 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10906 {
10907 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10908 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10909 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10910 uErrorCode, 0);
10911 }
10912
10913 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10914 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10915 }
10916 else
10917 {
10918 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10919 {
10920 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10921 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10922 uErrorCode, 0);
10923 }
10924 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10925 }
10926
10927 /*
10928 * Read the legacy descriptor and maybe the long mode extensions if
10929 * required.
10930 */
10931 VBOXSTRICTRC rcStrict;
10932 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10933 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10934 else
10935 {
10936 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10937 if (rcStrict == VINF_SUCCESS)
10938 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10939 if (rcStrict == VINF_SUCCESS)
10940 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10941 if (rcStrict == VINF_SUCCESS)
10942 pDesc->Legacy.au16[3] = 0;
10943 else
10944 return rcStrict;
10945 }
10946
10947 if (rcStrict == VINF_SUCCESS)
10948 {
10949 if ( !IEM_IS_LONG_MODE(pVCpu)
10950 || pDesc->Legacy.Gen.u1DescType)
10951 pDesc->Long.au64[1] = 0;
10952 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10953 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10954 else
10955 {
10956 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10957 /** @todo is this the right exception? */
10958 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10959 }
10960 }
10961 return rcStrict;
10962}
10963
10964
10965/**
10966 * Fetches a descriptor table entry.
10967 *
10968 * @returns Strict VBox status code.
10969 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10970 * @param pDesc Where to return the descriptor table entry.
10971 * @param uSel The selector which table entry to fetch.
10972 * @param uXcpt The exception to raise on table lookup error.
10973 */
10974IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10975{
10976 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10977}
10978
10979
10980/**
10981 * Fakes a long mode stack selector for SS = 0.
10982 *
10983 * @param pDescSs Where to return the fake stack descriptor.
10984 * @param uDpl The DPL we want.
10985 */
10986IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10987{
10988 pDescSs->Long.au64[0] = 0;
10989 pDescSs->Long.au64[1] = 0;
10990 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10991 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10992 pDescSs->Long.Gen.u2Dpl = uDpl;
10993 pDescSs->Long.Gen.u1Present = 1;
10994 pDescSs->Long.Gen.u1Long = 1;
10995}
10996
10997
10998/**
10999 * Marks the selector descriptor as accessed (only non-system descriptors).
11000 *
11001 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11002 * will therefore skip the limit checks.
11003 *
11004 * @returns Strict VBox status code.
11005 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11006 * @param uSel The selector.
11007 */
11008IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11009{
11010 /*
11011 * Get the selector table base and calculate the entry address.
11012 */
11013 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11014 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11015 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11016 GCPtr += uSel & X86_SEL_MASK;
11017
11018 /*
11019 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11020 * ugly stuff to avoid this. This will make sure it's an atomic access
11021 * as well more or less remove any question about 8-bit or 32-bit accesss.
11022 */
11023 VBOXSTRICTRC rcStrict;
11024 uint32_t volatile *pu32;
11025 if ((GCPtr & 3) == 0)
11026 {
11027 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11028 GCPtr += 2 + 2;
11029 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11030 if (rcStrict != VINF_SUCCESS)
11031 return rcStrict;
11032 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11033 }
11034 else
11035 {
11036 /* The misaligned GDT/LDT case, map the whole thing. */
11037 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11038 if (rcStrict != VINF_SUCCESS)
11039 return rcStrict;
11040 switch ((uintptr_t)pu32 & 3)
11041 {
11042 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11043 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11044 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11045 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11046 }
11047 }
11048
11049 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11050}
11051
11052/** @} */
11053
11054
11055/*
11056 * Include the C/C++ implementation of instruction.
11057 */
11058#include "IEMAllCImpl.cpp.h"
11059
11060
11061
11062/** @name "Microcode" macros.
11063 *
11064 * The idea is that we should be able to use the same code to interpret
11065 * instructions as well as recompiler instructions. Thus this obfuscation.
11066 *
11067 * @{
11068 */
11069#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11070#define IEM_MC_END() }
11071#define IEM_MC_PAUSE() do {} while (0)
11072#define IEM_MC_CONTINUE() do {} while (0)
11073
11074/** Internal macro. */
11075#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11076 do \
11077 { \
11078 VBOXSTRICTRC rcStrict2 = a_Expr; \
11079 if (rcStrict2 != VINF_SUCCESS) \
11080 return rcStrict2; \
11081 } while (0)
11082
11083
11084#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11085#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11086#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11087#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11088#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11089#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11090#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11091#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11092#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11093 do { \
11094 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11095 return iemRaiseDeviceNotAvailable(pVCpu); \
11096 } while (0)
11097#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11098 do { \
11099 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11100 return iemRaiseDeviceNotAvailable(pVCpu); \
11101 } while (0)
11102#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11103 do { \
11104 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
11105 return iemRaiseMathFault(pVCpu); \
11106 } while (0)
11107#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11108 do { \
11109 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11110 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11111 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11112 return iemRaiseUndefinedOpcode(pVCpu); \
11113 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11114 return iemRaiseDeviceNotAvailable(pVCpu); \
11115 } while (0)
11116#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11117 do { \
11118 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11119 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11120 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11121 return iemRaiseUndefinedOpcode(pVCpu); \
11122 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11123 return iemRaiseDeviceNotAvailable(pVCpu); \
11124 } while (0)
11125#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11126 do { \
11127 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11128 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11129 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11130 return iemRaiseUndefinedOpcode(pVCpu); \
11131 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11132 return iemRaiseDeviceNotAvailable(pVCpu); \
11133 } while (0)
11134#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11135 do { \
11136 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11137 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11138 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11139 return iemRaiseUndefinedOpcode(pVCpu); \
11140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11141 return iemRaiseDeviceNotAvailable(pVCpu); \
11142 } while (0)
11143#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11144 do { \
11145 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11146 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11147 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11148 return iemRaiseUndefinedOpcode(pVCpu); \
11149 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11150 return iemRaiseDeviceNotAvailable(pVCpu); \
11151 } while (0)
11152#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11153 do { \
11154 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11155 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11156 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11157 return iemRaiseUndefinedOpcode(pVCpu); \
11158 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11159 return iemRaiseDeviceNotAvailable(pVCpu); \
11160 } while (0)
11161#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11162 do { \
11163 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11164 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11165 return iemRaiseUndefinedOpcode(pVCpu); \
11166 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11167 return iemRaiseDeviceNotAvailable(pVCpu); \
11168 } while (0)
11169#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11170 do { \
11171 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11172 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11173 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11174 return iemRaiseUndefinedOpcode(pVCpu); \
11175 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11176 return iemRaiseDeviceNotAvailable(pVCpu); \
11177 } while (0)
11178#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11179 do { \
11180 if (pVCpu->iem.s.uCpl != 0) \
11181 return iemRaiseGeneralProtectionFault0(pVCpu); \
11182 } while (0)
11183#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11184 do { \
11185 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11186 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11187 } while (0)
11188#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11189 do { \
11190 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11191 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11192 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11193 return iemRaiseUndefinedOpcode(pVCpu); \
11194 } while (0)
11195#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11196 do { \
11197 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11198 return iemRaiseGeneralProtectionFault0(pVCpu); \
11199 } while (0)
11200
11201
11202#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11203#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11204#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11205#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11206#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11207#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11208#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11209 uint32_t a_Name; \
11210 uint32_t *a_pName = &a_Name
11211#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11212 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11213
11214#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11215#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11216
11217#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11218#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11219#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11220#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11221#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11222#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11223#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11224#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11225#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11226#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11227#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11228#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11229#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11230#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11231#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11232#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11233#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11234#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11235 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11236 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11237 } while (0)
11238#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11239 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11240 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11241 } while (0)
11242#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11243 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11244 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11245 } while (0)
11246/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11247#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11248 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11249 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11250 } while (0)
11251#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11252 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11253 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11254 } while (0)
11255/** @note Not for IOPL or IF testing or modification. */
11256#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11257#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11258#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
11259#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
11260
11261#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11262#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11263#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11264#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11265#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11266#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11267#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11268#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11269#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11270#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11271/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11272#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11273 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11274 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11275 } while (0)
11276#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11277 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11278 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11279 } while (0)
11280#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11281 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11282
11283
11284#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11285#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11286/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11287 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11288#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11289#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11290/** @note Not for IOPL or IF testing or modification. */
11291#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11292
11293#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11294#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11295#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11296 do { \
11297 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11298 *pu32Reg += (a_u32Value); \
11299 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11300 } while (0)
11301#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11302
11303#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11304#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11305#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11306 do { \
11307 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11308 *pu32Reg -= (a_u32Value); \
11309 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11310 } while (0)
11311#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11312#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11313
11314#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11315#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11316#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11317#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11318#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11319#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11320#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11321
11322#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11323#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11324#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11325#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11326
11327#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11328#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11329#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11330
11331#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11332#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11333#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11334
11335#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11336#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11337#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11338
11339#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11340#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11341#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11342
11343#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11344
11345#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11346
11347#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11348#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11349#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11350 do { \
11351 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11352 *pu32Reg &= (a_u32Value); \
11353 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11354 } while (0)
11355#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11356
11357#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11358#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11359#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11360 do { \
11361 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11362 *pu32Reg |= (a_u32Value); \
11363 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11364 } while (0)
11365#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11366
11367
11368/** @note Not for IOPL or IF modification. */
11369#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11370/** @note Not for IOPL or IF modification. */
11371#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11372/** @note Not for IOPL or IF modification. */
11373#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11374
11375#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11376
11377/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11378#define IEM_MC_FPU_TO_MMX_MODE() do { \
11379 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
11380 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
11381 } while (0)
11382
11383/** Switches the FPU state from MMX mode (FTW=0xffff). */
11384#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11385 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
11386 } while (0)
11387
11388#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11389 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
11390#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11391 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11392#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11393 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11394 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11395 } while (0)
11396#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11397 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11398 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11399 } while (0)
11400#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11401 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11402#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11403 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11404#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11405 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11406
11407#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11408 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
11409 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
11410 } while (0)
11411#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11412 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11413#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11414 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11415#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11416 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11417#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11418 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11419 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11420 } while (0)
11421#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11422 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11423#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11424 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11425 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11426 } while (0)
11427#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11428 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11429#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11430 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11431 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11432 } while (0)
11433#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11434 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11435#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11436 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11437#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11438 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11439#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11440 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
11441#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11442 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
11443 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
11444 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
11445 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
11446 } while (0)
11447
11448#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11449 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11450 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
11451 } while (0)
11452#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11453 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11454 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11455 } while (0)
11456#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11457 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11458 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11459 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11460 } while (0)
11461#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11462 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11463 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11464 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11465 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11466 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11467 } while (0)
11468
11469#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11470#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11471 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11472 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11473 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11474 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11475 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11476 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11477 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11478 } while (0)
11479#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11480 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11481 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11482 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11483 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11484 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11485 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11486 } while (0)
11487#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11488 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11489 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11490 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11491 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11492 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11493 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11494 } while (0)
11495#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11496 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11497 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11498 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11499 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11500 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11501 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11502 } while (0)
11503
11504#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11505 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11506#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11507 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11508#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11509 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
11510#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11511 do { uintptr_t const iYRegTmp = (a_iYReg); \
11512 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11513 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11514 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
11515 } while (0)
11516
11517#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11518 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11519 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11520 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11521 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11522 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11523 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11524 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11525 } while (0)
11526#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11527 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11528 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11529 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11530 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11531 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11532 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11533 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11534 } while (0)
11535#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11536 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11537 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11538 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11539 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11540 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11541 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11542 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11543 } while (0)
11544
11545#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11546 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11547 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11548 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11549 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11550 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11551 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11552 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11553 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11554 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11555 } while (0)
11556#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11557 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11558 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11559 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11560 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11561 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11562 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11563 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11564 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11565 } while (0)
11566#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11567 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11568 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11569 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11570 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11571 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11572 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11573 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11574 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11575 } while (0)
11576#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11577 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11578 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11579 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11580 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11581 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11582 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11583 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11584 } while (0)
11585
11586#ifndef IEM_WITH_SETJMP
11587# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11588 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11589# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11590 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11591# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11592 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11593#else
11594# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11595 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11596# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11597 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11598# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11599 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11600#endif
11601
11602#ifndef IEM_WITH_SETJMP
11603# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11604 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11605# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11607# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11608 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11609#else
11610# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11611 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11612# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11613 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11614# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11615 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11616#endif
11617
11618#ifndef IEM_WITH_SETJMP
11619# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11620 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11621# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11622 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11623# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11625#else
11626# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11627 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11628# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11629 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11630# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11631 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11632#endif
11633
11634#ifdef SOME_UNUSED_FUNCTION
11635# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11636 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11637#endif
11638
11639#ifndef IEM_WITH_SETJMP
11640# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11641 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11642# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11643 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11644# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11645 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11646# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11647 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11648#else
11649# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11650 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11651# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11652 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11653# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11654 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11655# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11656 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11657#endif
11658
11659#ifndef IEM_WITH_SETJMP
11660# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11662# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11664# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11666#else
11667# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11668 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11669# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11670 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11671# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11672 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11673#endif
11674
11675#ifndef IEM_WITH_SETJMP
11676# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11677 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11678# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11680#else
11681# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11682 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11683# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11684 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11685#endif
11686
11687#ifndef IEM_WITH_SETJMP
11688# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11689 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11690# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11691 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11692#else
11693# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11694 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11695# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11696 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11697#endif
11698
11699
11700
11701#ifndef IEM_WITH_SETJMP
11702# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11703 do { \
11704 uint8_t u8Tmp; \
11705 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11706 (a_u16Dst) = u8Tmp; \
11707 } while (0)
11708# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11709 do { \
11710 uint8_t u8Tmp; \
11711 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11712 (a_u32Dst) = u8Tmp; \
11713 } while (0)
11714# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11715 do { \
11716 uint8_t u8Tmp; \
11717 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11718 (a_u64Dst) = u8Tmp; \
11719 } while (0)
11720# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11721 do { \
11722 uint16_t u16Tmp; \
11723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11724 (a_u32Dst) = u16Tmp; \
11725 } while (0)
11726# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11727 do { \
11728 uint16_t u16Tmp; \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11730 (a_u64Dst) = u16Tmp; \
11731 } while (0)
11732# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11733 do { \
11734 uint32_t u32Tmp; \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11736 (a_u64Dst) = u32Tmp; \
11737 } while (0)
11738#else /* IEM_WITH_SETJMP */
11739# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11740 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11741# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11742 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11743# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11744 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11745# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11746 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11747# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11748 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11749# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11750 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11751#endif /* IEM_WITH_SETJMP */
11752
11753#ifndef IEM_WITH_SETJMP
11754# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11755 do { \
11756 uint8_t u8Tmp; \
11757 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11758 (a_u16Dst) = (int8_t)u8Tmp; \
11759 } while (0)
11760# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11761 do { \
11762 uint8_t u8Tmp; \
11763 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11764 (a_u32Dst) = (int8_t)u8Tmp; \
11765 } while (0)
11766# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11767 do { \
11768 uint8_t u8Tmp; \
11769 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11770 (a_u64Dst) = (int8_t)u8Tmp; \
11771 } while (0)
11772# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11773 do { \
11774 uint16_t u16Tmp; \
11775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11776 (a_u32Dst) = (int16_t)u16Tmp; \
11777 } while (0)
11778# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11779 do { \
11780 uint16_t u16Tmp; \
11781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11782 (a_u64Dst) = (int16_t)u16Tmp; \
11783 } while (0)
11784# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11785 do { \
11786 uint32_t u32Tmp; \
11787 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11788 (a_u64Dst) = (int32_t)u32Tmp; \
11789 } while (0)
11790#else /* IEM_WITH_SETJMP */
11791# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11792 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11793# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11794 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11795# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11796 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11797# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11798 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11799# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11800 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11801# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11802 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11803#endif /* IEM_WITH_SETJMP */
11804
11805#ifndef IEM_WITH_SETJMP
11806# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11807 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11808# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11809 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11810# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11811 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11812# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11813 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11814#else
11815# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11816 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11817# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11818 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11819# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11820 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11821# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11822 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11823#endif
11824
11825#ifndef IEM_WITH_SETJMP
11826# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11827 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11828# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11830# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11831 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11832# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11833 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11834#else
11835# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11836 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11837# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11838 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11839# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11840 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11841# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11842 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11843#endif
11844
11845#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11846#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11847#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11848#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11849#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11850#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11851#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11852 do { \
11853 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11854 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11855 } while (0)
11856
11857#ifndef IEM_WITH_SETJMP
11858# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11859 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11860# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11861 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11862#else
11863# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11864 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11865# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11866 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11867#endif
11868
11869#ifndef IEM_WITH_SETJMP
11870# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11871 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11872# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11873 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11874#else
11875# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11876 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11877# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11878 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11879#endif
11880
11881
11882#define IEM_MC_PUSH_U16(a_u16Value) \
11883 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11884#define IEM_MC_PUSH_U32(a_u32Value) \
11885 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11886#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11887 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11888#define IEM_MC_PUSH_U64(a_u64Value) \
11889 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11890
11891#define IEM_MC_POP_U16(a_pu16Value) \
11892 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11893#define IEM_MC_POP_U32(a_pu32Value) \
11894 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11895#define IEM_MC_POP_U64(a_pu64Value) \
11896 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11897
11898/** Maps guest memory for direct or bounce buffered access.
11899 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11900 * @remarks May return.
11901 */
11902#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11904
11905/** Maps guest memory for direct or bounce buffered access.
11906 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11907 * @remarks May return.
11908 */
11909#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11911
11912/** Commits the memory and unmaps the guest memory.
11913 * @remarks May return.
11914 */
11915#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11916 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11917
11918/** Commits the memory and unmaps the guest memory unless the FPU status word
11919 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11920 * that would cause FLD not to store.
11921 *
11922 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11923 * store, while \#P will not.
11924 *
11925 * @remarks May in theory return - for now.
11926 */
11927#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11928 do { \
11929 if ( !(a_u16FSW & X86_FSW_ES) \
11930 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11931 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
11932 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11933 } while (0)
11934
11935/** Calculate efficient address from R/M. */
11936#ifndef IEM_WITH_SETJMP
11937# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11938 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11939#else
11940# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11941 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11942#endif
11943
11944#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11945#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11946#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11947#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11948#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11949#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11950#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11951
11952/**
11953 * Defers the rest of the instruction emulation to a C implementation routine
11954 * and returns, only taking the standard parameters.
11955 *
11956 * @param a_pfnCImpl The pointer to the C routine.
11957 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11958 */
11959#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11960
11961/**
11962 * Defers the rest of instruction emulation to a C implementation routine and
11963 * returns, taking one argument in addition to the standard ones.
11964 *
11965 * @param a_pfnCImpl The pointer to the C routine.
11966 * @param a0 The argument.
11967 */
11968#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11969
11970/**
11971 * Defers the rest of the instruction emulation to a C implementation routine
11972 * and returns, taking two arguments in addition to the standard ones.
11973 *
11974 * @param a_pfnCImpl The pointer to the C routine.
11975 * @param a0 The first extra argument.
11976 * @param a1 The second extra argument.
11977 */
11978#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11979
11980/**
11981 * Defers the rest of the instruction emulation to a C implementation routine
11982 * and returns, taking three arguments in addition to the standard ones.
11983 *
11984 * @param a_pfnCImpl The pointer to the C routine.
11985 * @param a0 The first extra argument.
11986 * @param a1 The second extra argument.
11987 * @param a2 The third extra argument.
11988 */
11989#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11990
11991/**
11992 * Defers the rest of the instruction emulation to a C implementation routine
11993 * and returns, taking four arguments in addition to the standard ones.
11994 *
11995 * @param a_pfnCImpl The pointer to the C routine.
11996 * @param a0 The first extra argument.
11997 * @param a1 The second extra argument.
11998 * @param a2 The third extra argument.
11999 * @param a3 The fourth extra argument.
12000 */
12001#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12002
12003/**
12004 * Defers the rest of the instruction emulation to a C implementation routine
12005 * and returns, taking two arguments in addition to the standard ones.
12006 *
12007 * @param a_pfnCImpl The pointer to the C routine.
12008 * @param a0 The first extra argument.
12009 * @param a1 The second extra argument.
12010 * @param a2 The third extra argument.
12011 * @param a3 The fourth extra argument.
12012 * @param a4 The fifth extra argument.
12013 */
12014#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12015
12016/**
12017 * Defers the entire instruction emulation to a C implementation routine and
12018 * returns, only taking the standard parameters.
12019 *
12020 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12021 *
12022 * @param a_pfnCImpl The pointer to the C routine.
12023 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12024 */
12025#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12026
12027/**
12028 * Defers the entire instruction emulation to a C implementation routine and
12029 * returns, taking one argument in addition to the standard ones.
12030 *
12031 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12032 *
12033 * @param a_pfnCImpl The pointer to the C routine.
12034 * @param a0 The argument.
12035 */
12036#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12037
12038/**
12039 * Defers the entire instruction emulation to a C implementation routine and
12040 * returns, taking two arguments in addition to the standard ones.
12041 *
12042 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12043 *
12044 * @param a_pfnCImpl The pointer to the C routine.
12045 * @param a0 The first extra argument.
12046 * @param a1 The second extra argument.
12047 */
12048#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12049
12050/**
12051 * Defers the entire instruction emulation to a C implementation routine and
12052 * returns, taking three arguments in addition to the standard ones.
12053 *
12054 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12055 *
12056 * @param a_pfnCImpl The pointer to the C routine.
12057 * @param a0 The first extra argument.
12058 * @param a1 The second extra argument.
12059 * @param a2 The third extra argument.
12060 */
12061#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12062
12063/**
12064 * Calls a FPU assembly implementation taking one visible argument.
12065 *
12066 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12067 * @param a0 The first extra argument.
12068 */
12069#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12070 do { \
12071 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
12072 } while (0)
12073
12074/**
12075 * Calls a FPU assembly implementation taking two visible arguments.
12076 *
12077 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12078 * @param a0 The first extra argument.
12079 * @param a1 The second extra argument.
12080 */
12081#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12082 do { \
12083 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12084 } while (0)
12085
12086/**
12087 * Calls a FPU assembly implementation taking three visible arguments.
12088 *
12089 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12090 * @param a0 The first extra argument.
12091 * @param a1 The second extra argument.
12092 * @param a2 The third extra argument.
12093 */
12094#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12095 do { \
12096 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12097 } while (0)
12098
12099#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12100 do { \
12101 (a_FpuData).FSW = (a_FSW); \
12102 (a_FpuData).r80Result = *(a_pr80Value); \
12103 } while (0)
12104
12105/** Pushes FPU result onto the stack. */
12106#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12107 iemFpuPushResult(pVCpu, &a_FpuData)
12108/** Pushes FPU result onto the stack and sets the FPUDP. */
12109#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12110 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12111
12112/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12113#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12114 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12115
12116/** Stores FPU result in a stack register. */
12117#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12118 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12119/** Stores FPU result in a stack register and pops the stack. */
12120#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12121 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12122/** Stores FPU result in a stack register and sets the FPUDP. */
12123#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12124 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12125/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12126 * stack. */
12127#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12128 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12129
12130/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12131#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12132 iemFpuUpdateOpcodeAndIp(pVCpu)
12133/** Free a stack register (for FFREE and FFREEP). */
12134#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12135 iemFpuStackFree(pVCpu, a_iStReg)
12136/** Increment the FPU stack pointer. */
12137#define IEM_MC_FPU_STACK_INC_TOP() \
12138 iemFpuStackIncTop(pVCpu)
12139/** Decrement the FPU stack pointer. */
12140#define IEM_MC_FPU_STACK_DEC_TOP() \
12141 iemFpuStackDecTop(pVCpu)
12142
12143/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12144#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12145 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12146/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12147#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12148 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12149/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12150#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12151 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12152/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12153#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12154 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12155/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12156 * stack. */
12157#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12158 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12159/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12160#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12161 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12162
12163/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12164#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12165 iemFpuStackUnderflow(pVCpu, a_iStDst)
12166/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12167 * stack. */
12168#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12169 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12170/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12171 * FPUDS. */
12172#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12173 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12174/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12175 * FPUDS. Pops stack. */
12176#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12177 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12178/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12179 * stack twice. */
12180#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12181 iemFpuStackUnderflowThenPopPop(pVCpu)
12182/** Raises a FPU stack underflow exception for an instruction pushing a result
12183 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12184#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12185 iemFpuStackPushUnderflow(pVCpu)
12186/** Raises a FPU stack underflow exception for an instruction pushing a result
12187 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12188#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12189 iemFpuStackPushUnderflowTwo(pVCpu)
12190
12191/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12192 * FPUIP, FPUCS and FOP. */
12193#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12194 iemFpuStackPushOverflow(pVCpu)
12195/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12196 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12197#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12198 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12199/** Prepares for using the FPU state.
12200 * Ensures that we can use the host FPU in the current context (RC+R0.
12201 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12202#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12203/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12204#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12205/** Actualizes the guest FPU state so it can be accessed and modified. */
12206#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12207
12208/** Prepares for using the SSE state.
12209 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12210 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12211#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12212/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12213#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12214/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12215#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12216
12217/** Prepares for using the AVX state.
12218 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12219 * Ensures the guest AVX state in the CPUMCTX is up to date.
12220 * @note This will include the AVX512 state too when support for it is added
12221 * due to the zero extending feature of VEX instruction. */
12222#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12223/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12224#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12225/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12226#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12227
12228/**
12229 * Calls a MMX assembly implementation taking two visible arguments.
12230 *
12231 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12232 * @param a0 The first extra argument.
12233 * @param a1 The second extra argument.
12234 */
12235#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12236 do { \
12237 IEM_MC_PREPARE_FPU_USAGE(); \
12238 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12239 } while (0)
12240
12241/**
12242 * Calls a MMX assembly implementation taking three visible arguments.
12243 *
12244 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12245 * @param a0 The first extra argument.
12246 * @param a1 The second extra argument.
12247 * @param a2 The third extra argument.
12248 */
12249#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12250 do { \
12251 IEM_MC_PREPARE_FPU_USAGE(); \
12252 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12253 } while (0)
12254
12255
12256/**
12257 * Calls a SSE assembly implementation taking two visible arguments.
12258 *
12259 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12260 * @param a0 The first extra argument.
12261 * @param a1 The second extra argument.
12262 */
12263#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12264 do { \
12265 IEM_MC_PREPARE_SSE_USAGE(); \
12266 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12267 } while (0)
12268
12269/**
12270 * Calls a SSE assembly implementation taking three visible arguments.
12271 *
12272 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12273 * @param a0 The first extra argument.
12274 * @param a1 The second extra argument.
12275 * @param a2 The third extra argument.
12276 */
12277#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12278 do { \
12279 IEM_MC_PREPARE_SSE_USAGE(); \
12280 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12281 } while (0)
12282
12283
12284/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12285 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12286#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12287 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
12288
12289/**
12290 * Calls a AVX assembly implementation taking two visible arguments.
12291 *
12292 * There is one implicit zero'th argument, a pointer to the extended state.
12293 *
12294 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12295 * @param a1 The first extra argument.
12296 * @param a2 The second extra argument.
12297 */
12298#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12299 do { \
12300 IEM_MC_PREPARE_AVX_USAGE(); \
12301 a_pfnAImpl(pXState, (a1), (a2)); \
12302 } while (0)
12303
12304/**
12305 * Calls a AVX assembly implementation taking three visible arguments.
12306 *
12307 * There is one implicit zero'th argument, a pointer to the extended state.
12308 *
12309 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12310 * @param a1 The first extra argument.
12311 * @param a2 The second extra argument.
12312 * @param a3 The third extra argument.
12313 */
12314#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12315 do { \
12316 IEM_MC_PREPARE_AVX_USAGE(); \
12317 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12318 } while (0)
12319
12320/** @note Not for IOPL or IF testing. */
12321#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12322/** @note Not for IOPL or IF testing. */
12323#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12324/** @note Not for IOPL or IF testing. */
12325#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12326/** @note Not for IOPL or IF testing. */
12327#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12328/** @note Not for IOPL or IF testing. */
12329#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12330 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12331 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12332/** @note Not for IOPL or IF testing. */
12333#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12334 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12335 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12336/** @note Not for IOPL or IF testing. */
12337#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12338 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12339 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12340 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12341/** @note Not for IOPL or IF testing. */
12342#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12343 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12344 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12345 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12346#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12347#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12348#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12349/** @note Not for IOPL or IF testing. */
12350#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12351 if ( pVCpu->cpum.GstCtx.cx != 0 \
12352 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12353/** @note Not for IOPL or IF testing. */
12354#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12355 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12356 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12357/** @note Not for IOPL or IF testing. */
12358#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12359 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12360 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12361/** @note Not for IOPL or IF testing. */
12362#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12363 if ( pVCpu->cpum.GstCtx.cx != 0 \
12364 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12365/** @note Not for IOPL or IF testing. */
12366#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12367 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12368 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12369/** @note Not for IOPL or IF testing. */
12370#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12371 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12372 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12373#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12374#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12375
12376#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12377 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12378#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12379 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12380#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12381 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12382#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12383 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12384#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12385 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12386#define IEM_MC_IF_FCW_IM() \
12387 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
12388
12389#define IEM_MC_ELSE() } else {
12390#define IEM_MC_ENDIF() } do {} while (0)
12391
12392/** @} */
12393
12394
12395/** @name Opcode Debug Helpers.
12396 * @{
12397 */
12398#ifdef VBOX_WITH_STATISTICS
12399# ifdef IN_RING3
12400# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
12401# else
12402# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
12403# endif
12404#else
12405# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12406#endif
12407
12408#ifdef DEBUG
12409# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12410 do { \
12411 IEMOP_INC_STATS(a_Stats); \
12412 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12413 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12414 } while (0)
12415
12416# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12417 do { \
12418 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12419 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12420 (void)RT_CONCAT(OP_,a_Upper); \
12421 (void)(a_fDisHints); \
12422 (void)(a_fIemHints); \
12423 } while (0)
12424
12425# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12426 do { \
12427 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12428 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12429 (void)RT_CONCAT(OP_,a_Upper); \
12430 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12431 (void)(a_fDisHints); \
12432 (void)(a_fIemHints); \
12433 } while (0)
12434
12435# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12436 do { \
12437 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12438 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12439 (void)RT_CONCAT(OP_,a_Upper); \
12440 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12441 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12442 (void)(a_fDisHints); \
12443 (void)(a_fIemHints); \
12444 } while (0)
12445
12446# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12447 do { \
12448 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12449 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12450 (void)RT_CONCAT(OP_,a_Upper); \
12451 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12452 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12453 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12454 (void)(a_fDisHints); \
12455 (void)(a_fIemHints); \
12456 } while (0)
12457
12458# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12459 do { \
12460 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12461 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12462 (void)RT_CONCAT(OP_,a_Upper); \
12463 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12464 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12465 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12466 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12467 (void)(a_fDisHints); \
12468 (void)(a_fIemHints); \
12469 } while (0)
12470
12471#else
12472# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12473
12474# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12475 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12476# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12477 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12478# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12479 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12480# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12481 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12482# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12483 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12484
12485#endif
12486
12487#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12488 IEMOP_MNEMONIC0EX(a_Lower, \
12489 #a_Lower, \
12490 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12491#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12492 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12493 #a_Lower " " #a_Op1, \
12494 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12495#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12496 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12497 #a_Lower " " #a_Op1 "," #a_Op2, \
12498 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12499#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12500 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12501 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12502 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12503#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12504 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12505 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12506 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12507
12508/** @} */
12509
12510
12511/** @name Opcode Helpers.
12512 * @{
12513 */
12514
12515#ifdef IN_RING3
12516# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12517 do { \
12518 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12519 else \
12520 { \
12521 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12522 return IEMOP_RAISE_INVALID_OPCODE(); \
12523 } \
12524 } while (0)
12525#else
12526# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12527 do { \
12528 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12529 else return IEMOP_RAISE_INVALID_OPCODE(); \
12530 } while (0)
12531#endif
12532
12533/** The instruction requires a 186 or later. */
12534#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12535# define IEMOP_HLP_MIN_186() do { } while (0)
12536#else
12537# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12538#endif
12539
12540/** The instruction requires a 286 or later. */
12541#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12542# define IEMOP_HLP_MIN_286() do { } while (0)
12543#else
12544# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12545#endif
12546
12547/** The instruction requires a 386 or later. */
12548#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12549# define IEMOP_HLP_MIN_386() do { } while (0)
12550#else
12551# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12552#endif
12553
12554/** The instruction requires a 386 or later if the given expression is true. */
12555#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12556# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12557#else
12558# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12559#endif
12560
12561/** The instruction requires a 486 or later. */
12562#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12563# define IEMOP_HLP_MIN_486() do { } while (0)
12564#else
12565# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12566#endif
12567
12568/** The instruction requires a Pentium (586) or later. */
12569#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12570# define IEMOP_HLP_MIN_586() do { } while (0)
12571#else
12572# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12573#endif
12574
12575/** The instruction requires a PentiumPro (686) or later. */
12576#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12577# define IEMOP_HLP_MIN_686() do { } while (0)
12578#else
12579# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12580#endif
12581
12582
12583/** The instruction raises an \#UD in real and V8086 mode. */
12584#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12585 do \
12586 { \
12587 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12588 else return IEMOP_RAISE_INVALID_OPCODE(); \
12589 } while (0)
12590
12591#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12592/** This instruction raises an \#UD in real and V8086 mode or when not using a
12593 * 64-bit code segment when in long mode (applicable to all VMX instructions
12594 * except VMCALL).
12595 */
12596#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12597 do \
12598 { \
12599 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12600 && ( !IEM_IS_LONG_MODE(pVCpu) \
12601 || IEM_IS_64BIT_CODE(pVCpu))) \
12602 { /* likely */ } \
12603 else \
12604 { \
12605 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12606 { \
12607 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12608 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12609 return IEMOP_RAISE_INVALID_OPCODE(); \
12610 } \
12611 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12612 { \
12613 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12614 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12615 return IEMOP_RAISE_INVALID_OPCODE(); \
12616 } \
12617 } \
12618 } while (0)
12619
12620/** The instruction can only be executed in VMX operation (VMX root mode and
12621 * non-root mode).
12622 *
12623 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12624 */
12625# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12626 do \
12627 { \
12628 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12629 else \
12630 { \
12631 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12632 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12633 return IEMOP_RAISE_INVALID_OPCODE(); \
12634 } \
12635 } while (0)
12636#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12637
12638/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12639 * 64-bit mode. */
12640#define IEMOP_HLP_NO_64BIT() \
12641 do \
12642 { \
12643 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12644 return IEMOP_RAISE_INVALID_OPCODE(); \
12645 } while (0)
12646
12647/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12648 * 64-bit mode. */
12649#define IEMOP_HLP_ONLY_64BIT() \
12650 do \
12651 { \
12652 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12653 return IEMOP_RAISE_INVALID_OPCODE(); \
12654 } while (0)
12655
12656/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12657#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12658 do \
12659 { \
12660 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12661 iemRecalEffOpSize64Default(pVCpu); \
12662 } while (0)
12663
12664/** The instruction has 64-bit operand size if 64-bit mode. */
12665#define IEMOP_HLP_64BIT_OP_SIZE() \
12666 do \
12667 { \
12668 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12669 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12670 } while (0)
12671
12672/** Only a REX prefix immediately preceeding the first opcode byte takes
12673 * effect. This macro helps ensuring this as well as logging bad guest code. */
12674#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12675 do \
12676 { \
12677 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12678 { \
12679 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12680 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12681 pVCpu->iem.s.uRexB = 0; \
12682 pVCpu->iem.s.uRexIndex = 0; \
12683 pVCpu->iem.s.uRexReg = 0; \
12684 iemRecalEffOpSize(pVCpu); \
12685 } \
12686 } while (0)
12687
12688/**
12689 * Done decoding.
12690 */
12691#define IEMOP_HLP_DONE_DECODING() \
12692 do \
12693 { \
12694 /*nothing for now, maybe later... */ \
12695 } while (0)
12696
12697/**
12698 * Done decoding, raise \#UD exception if lock prefix present.
12699 */
12700#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12701 do \
12702 { \
12703 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12704 { /* likely */ } \
12705 else \
12706 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12707 } while (0)
12708
12709
12710/**
12711 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12712 * repnz or size prefixes are present, or if in real or v8086 mode.
12713 */
12714#define IEMOP_HLP_DONE_VEX_DECODING() \
12715 do \
12716 { \
12717 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12718 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12719 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12720 { /* likely */ } \
12721 else \
12722 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12723 } while (0)
12724
12725/**
12726 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12727 * repnz or size prefixes are present, or if in real or v8086 mode.
12728 */
12729#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12730 do \
12731 { \
12732 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12733 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12734 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12735 && pVCpu->iem.s.uVexLength == 0)) \
12736 { /* likely */ } \
12737 else \
12738 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12739 } while (0)
12740
12741
12742/**
12743 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12744 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12745 * register 0, or if in real or v8086 mode.
12746 */
12747#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12748 do \
12749 { \
12750 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12751 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12752 && !pVCpu->iem.s.uVex3rdReg \
12753 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12754 { /* likely */ } \
12755 else \
12756 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12757 } while (0)
12758
12759/**
12760 * Done decoding VEX, no V, L=0.
12761 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12762 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12763 */
12764#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12765 do \
12766 { \
12767 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12768 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12769 && pVCpu->iem.s.uVexLength == 0 \
12770 && pVCpu->iem.s.uVex3rdReg == 0 \
12771 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12772 { /* likely */ } \
12773 else \
12774 return IEMOP_RAISE_INVALID_OPCODE(); \
12775 } while (0)
12776
12777#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12778 do \
12779 { \
12780 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12781 { /* likely */ } \
12782 else \
12783 { \
12784 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12785 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12786 } \
12787 } while (0)
12788#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12789 do \
12790 { \
12791 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12792 { /* likely */ } \
12793 else \
12794 { \
12795 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12796 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12797 } \
12798 } while (0)
12799
12800/**
12801 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12802 * are present.
12803 */
12804#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12805 do \
12806 { \
12807 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12808 { /* likely */ } \
12809 else \
12810 return IEMOP_RAISE_INVALID_OPCODE(); \
12811 } while (0)
12812
12813/**
12814 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12815 * prefixes are present.
12816 */
12817#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12818 do \
12819 { \
12820 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12821 { /* likely */ } \
12822 else \
12823 return IEMOP_RAISE_INVALID_OPCODE(); \
12824 } while (0)
12825
12826
12827/**
12828 * Calculates the effective address of a ModR/M memory operand.
12829 *
12830 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12831 *
12832 * @return Strict VBox status code.
12833 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12834 * @param bRm The ModRM byte.
12835 * @param cbImm The size of any immediate following the
12836 * effective address opcode bytes. Important for
12837 * RIP relative addressing.
12838 * @param pGCPtrEff Where to return the effective address.
12839 */
12840IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12841{
12842 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12843# define SET_SS_DEF() \
12844 do \
12845 { \
12846 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12847 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12848 } while (0)
12849
12850 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12851 {
12852/** @todo Check the effective address size crap! */
12853 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12854 {
12855 uint16_t u16EffAddr;
12856
12857 /* Handle the disp16 form with no registers first. */
12858 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12859 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12860 else
12861 {
12862 /* Get the displacment. */
12863 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12864 {
12865 case 0: u16EffAddr = 0; break;
12866 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12867 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12868 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12869 }
12870
12871 /* Add the base and index registers to the disp. */
12872 switch (bRm & X86_MODRM_RM_MASK)
12873 {
12874 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12875 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12876 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12877 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12878 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12879 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12880 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12881 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12882 }
12883 }
12884
12885 *pGCPtrEff = u16EffAddr;
12886 }
12887 else
12888 {
12889 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12890 uint32_t u32EffAddr;
12891
12892 /* Handle the disp32 form with no registers first. */
12893 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12894 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12895 else
12896 {
12897 /* Get the register (or SIB) value. */
12898 switch ((bRm & X86_MODRM_RM_MASK))
12899 {
12900 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12901 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12902 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12903 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12904 case 4: /* SIB */
12905 {
12906 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12907
12908 /* Get the index and scale it. */
12909 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12910 {
12911 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12912 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12913 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12914 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12915 case 4: u32EffAddr = 0; /*none */ break;
12916 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12917 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12918 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12919 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12920 }
12921 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12922
12923 /* add base */
12924 switch (bSib & X86_SIB_BASE_MASK)
12925 {
12926 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12927 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12928 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12929 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12930 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12931 case 5:
12932 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12933 {
12934 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12935 SET_SS_DEF();
12936 }
12937 else
12938 {
12939 uint32_t u32Disp;
12940 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12941 u32EffAddr += u32Disp;
12942 }
12943 break;
12944 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12945 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12946 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12947 }
12948 break;
12949 }
12950 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12951 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12952 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12953 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12954 }
12955
12956 /* Get and add the displacement. */
12957 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12958 {
12959 case 0:
12960 break;
12961 case 1:
12962 {
12963 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12964 u32EffAddr += i8Disp;
12965 break;
12966 }
12967 case 2:
12968 {
12969 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12970 u32EffAddr += u32Disp;
12971 break;
12972 }
12973 default:
12974 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12975 }
12976
12977 }
12978 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12979 *pGCPtrEff = u32EffAddr;
12980 else
12981 {
12982 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12983 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12984 }
12985 }
12986 }
12987 else
12988 {
12989 uint64_t u64EffAddr;
12990
12991 /* Handle the rip+disp32 form with no registers first. */
12992 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12993 {
12994 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12995 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12996 }
12997 else
12998 {
12999 /* Get the register (or SIB) value. */
13000 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13001 {
13002 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13003 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13004 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13005 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13006 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13007 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13008 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13009 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13010 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13011 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13012 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13013 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13014 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13015 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13016 /* SIB */
13017 case 4:
13018 case 12:
13019 {
13020 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13021
13022 /* Get the index and scale it. */
13023 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13024 {
13025 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13026 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13027 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13028 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13029 case 4: u64EffAddr = 0; /*none */ break;
13030 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13031 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13032 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13033 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13034 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13035 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13036 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13037 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13038 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13039 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13040 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13041 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13042 }
13043 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13044
13045 /* add base */
13046 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13047 {
13048 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13049 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13050 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13051 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13052 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13053 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13054 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13055 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13056 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13057 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13058 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13059 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13060 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13061 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13062 /* complicated encodings */
13063 case 5:
13064 case 13:
13065 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13066 {
13067 if (!pVCpu->iem.s.uRexB)
13068 {
13069 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13070 SET_SS_DEF();
13071 }
13072 else
13073 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13074 }
13075 else
13076 {
13077 uint32_t u32Disp;
13078 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13079 u64EffAddr += (int32_t)u32Disp;
13080 }
13081 break;
13082 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13083 }
13084 break;
13085 }
13086 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13087 }
13088
13089 /* Get and add the displacement. */
13090 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13091 {
13092 case 0:
13093 break;
13094 case 1:
13095 {
13096 int8_t i8Disp;
13097 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13098 u64EffAddr += i8Disp;
13099 break;
13100 }
13101 case 2:
13102 {
13103 uint32_t u32Disp;
13104 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13105 u64EffAddr += (int32_t)u32Disp;
13106 break;
13107 }
13108 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13109 }
13110
13111 }
13112
13113 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13114 *pGCPtrEff = u64EffAddr;
13115 else
13116 {
13117 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13118 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13119 }
13120 }
13121
13122 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13123 return VINF_SUCCESS;
13124}
13125
13126
13127/**
13128 * Calculates the effective address of a ModR/M memory operand.
13129 *
13130 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13131 *
13132 * @return Strict VBox status code.
13133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13134 * @param bRm The ModRM byte.
13135 * @param cbImm The size of any immediate following the
13136 * effective address opcode bytes. Important for
13137 * RIP relative addressing.
13138 * @param pGCPtrEff Where to return the effective address.
13139 * @param offRsp RSP displacement.
13140 */
13141IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13142{
13143 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13144# define SET_SS_DEF() \
13145 do \
13146 { \
13147 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13148 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13149 } while (0)
13150
13151 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13152 {
13153/** @todo Check the effective address size crap! */
13154 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13155 {
13156 uint16_t u16EffAddr;
13157
13158 /* Handle the disp16 form with no registers first. */
13159 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13160 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13161 else
13162 {
13163 /* Get the displacment. */
13164 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13165 {
13166 case 0: u16EffAddr = 0; break;
13167 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13168 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13169 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13170 }
13171
13172 /* Add the base and index registers to the disp. */
13173 switch (bRm & X86_MODRM_RM_MASK)
13174 {
13175 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13176 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13177 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13178 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13179 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13180 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13181 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13182 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13183 }
13184 }
13185
13186 *pGCPtrEff = u16EffAddr;
13187 }
13188 else
13189 {
13190 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13191 uint32_t u32EffAddr;
13192
13193 /* Handle the disp32 form with no registers first. */
13194 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13195 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13196 else
13197 {
13198 /* Get the register (or SIB) value. */
13199 switch ((bRm & X86_MODRM_RM_MASK))
13200 {
13201 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13202 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13203 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13204 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13205 case 4: /* SIB */
13206 {
13207 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13208
13209 /* Get the index and scale it. */
13210 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13211 {
13212 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13213 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13214 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13215 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13216 case 4: u32EffAddr = 0; /*none */ break;
13217 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13218 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13219 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13220 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13221 }
13222 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13223
13224 /* add base */
13225 switch (bSib & X86_SIB_BASE_MASK)
13226 {
13227 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13228 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13229 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13230 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13231 case 4:
13232 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13233 SET_SS_DEF();
13234 break;
13235 case 5:
13236 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13237 {
13238 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13239 SET_SS_DEF();
13240 }
13241 else
13242 {
13243 uint32_t u32Disp;
13244 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13245 u32EffAddr += u32Disp;
13246 }
13247 break;
13248 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13249 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13250 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13251 }
13252 break;
13253 }
13254 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13255 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13256 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13257 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13258 }
13259
13260 /* Get and add the displacement. */
13261 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13262 {
13263 case 0:
13264 break;
13265 case 1:
13266 {
13267 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13268 u32EffAddr += i8Disp;
13269 break;
13270 }
13271 case 2:
13272 {
13273 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13274 u32EffAddr += u32Disp;
13275 break;
13276 }
13277 default:
13278 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13279 }
13280
13281 }
13282 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13283 *pGCPtrEff = u32EffAddr;
13284 else
13285 {
13286 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13287 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13288 }
13289 }
13290 }
13291 else
13292 {
13293 uint64_t u64EffAddr;
13294
13295 /* Handle the rip+disp32 form with no registers first. */
13296 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13297 {
13298 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13299 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13300 }
13301 else
13302 {
13303 /* Get the register (or SIB) value. */
13304 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13305 {
13306 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13307 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13308 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13309 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13310 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13311 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13312 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13313 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13314 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13315 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13316 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13317 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13318 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13319 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13320 /* SIB */
13321 case 4:
13322 case 12:
13323 {
13324 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13325
13326 /* Get the index and scale it. */
13327 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13328 {
13329 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13330 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13331 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13332 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13333 case 4: u64EffAddr = 0; /*none */ break;
13334 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13335 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13336 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13337 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13338 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13339 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13340 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13341 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13342 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13343 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13344 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13345 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13346 }
13347 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13348
13349 /* add base */
13350 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13351 {
13352 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13353 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13354 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13355 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13356 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13357 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13358 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13359 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13360 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13361 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13362 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13363 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13364 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13365 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13366 /* complicated encodings */
13367 case 5:
13368 case 13:
13369 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13370 {
13371 if (!pVCpu->iem.s.uRexB)
13372 {
13373 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13374 SET_SS_DEF();
13375 }
13376 else
13377 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13378 }
13379 else
13380 {
13381 uint32_t u32Disp;
13382 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13383 u64EffAddr += (int32_t)u32Disp;
13384 }
13385 break;
13386 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13387 }
13388 break;
13389 }
13390 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13391 }
13392
13393 /* Get and add the displacement. */
13394 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13395 {
13396 case 0:
13397 break;
13398 case 1:
13399 {
13400 int8_t i8Disp;
13401 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13402 u64EffAddr += i8Disp;
13403 break;
13404 }
13405 case 2:
13406 {
13407 uint32_t u32Disp;
13408 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13409 u64EffAddr += (int32_t)u32Disp;
13410 break;
13411 }
13412 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13413 }
13414
13415 }
13416
13417 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13418 *pGCPtrEff = u64EffAddr;
13419 else
13420 {
13421 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13422 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13423 }
13424 }
13425
13426 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13427 return VINF_SUCCESS;
13428}
13429
13430
13431#ifdef IEM_WITH_SETJMP
13432/**
13433 * Calculates the effective address of a ModR/M memory operand.
13434 *
13435 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13436 *
13437 * May longjmp on internal error.
13438 *
13439 * @return The effective address.
13440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13441 * @param bRm The ModRM byte.
13442 * @param cbImm The size of any immediate following the
13443 * effective address opcode bytes. Important for
13444 * RIP relative addressing.
13445 */
13446IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13447{
13448 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13449# define SET_SS_DEF() \
13450 do \
13451 { \
13452 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13453 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13454 } while (0)
13455
13456 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13457 {
13458/** @todo Check the effective address size crap! */
13459 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13460 {
13461 uint16_t u16EffAddr;
13462
13463 /* Handle the disp16 form with no registers first. */
13464 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13465 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13466 else
13467 {
13468 /* Get the displacment. */
13469 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13470 {
13471 case 0: u16EffAddr = 0; break;
13472 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13473 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13474 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13475 }
13476
13477 /* Add the base and index registers to the disp. */
13478 switch (bRm & X86_MODRM_RM_MASK)
13479 {
13480 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13481 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13482 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13483 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13484 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13485 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13486 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13487 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13488 }
13489 }
13490
13491 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13492 return u16EffAddr;
13493 }
13494
13495 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13496 uint32_t u32EffAddr;
13497
13498 /* Handle the disp32 form with no registers first. */
13499 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13500 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13501 else
13502 {
13503 /* Get the register (or SIB) value. */
13504 switch ((bRm & X86_MODRM_RM_MASK))
13505 {
13506 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13507 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13508 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13509 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13510 case 4: /* SIB */
13511 {
13512 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13513
13514 /* Get the index and scale it. */
13515 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13516 {
13517 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13518 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13519 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13520 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13521 case 4: u32EffAddr = 0; /*none */ break;
13522 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13523 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13524 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13525 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13526 }
13527 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13528
13529 /* add base */
13530 switch (bSib & X86_SIB_BASE_MASK)
13531 {
13532 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13533 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13534 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13535 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13536 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13537 case 5:
13538 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13539 {
13540 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13541 SET_SS_DEF();
13542 }
13543 else
13544 {
13545 uint32_t u32Disp;
13546 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13547 u32EffAddr += u32Disp;
13548 }
13549 break;
13550 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13551 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13552 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13553 }
13554 break;
13555 }
13556 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13557 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13558 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13559 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13560 }
13561
13562 /* Get and add the displacement. */
13563 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13564 {
13565 case 0:
13566 break;
13567 case 1:
13568 {
13569 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13570 u32EffAddr += i8Disp;
13571 break;
13572 }
13573 case 2:
13574 {
13575 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13576 u32EffAddr += u32Disp;
13577 break;
13578 }
13579 default:
13580 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13581 }
13582 }
13583
13584 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13585 {
13586 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13587 return u32EffAddr;
13588 }
13589 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13590 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13591 return u32EffAddr & UINT16_MAX;
13592 }
13593
13594 uint64_t u64EffAddr;
13595
13596 /* Handle the rip+disp32 form with no registers first. */
13597 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13598 {
13599 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13600 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13601 }
13602 else
13603 {
13604 /* Get the register (or SIB) value. */
13605 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13606 {
13607 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13608 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13609 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13610 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13611 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13612 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13613 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13614 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13615 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13616 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13617 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13618 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13619 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13620 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13621 /* SIB */
13622 case 4:
13623 case 12:
13624 {
13625 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13626
13627 /* Get the index and scale it. */
13628 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13629 {
13630 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13631 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13632 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13633 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13634 case 4: u64EffAddr = 0; /*none */ break;
13635 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13636 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13637 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13638 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13639 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13640 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13641 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13642 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13643 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13644 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13645 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13646 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13647 }
13648 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13649
13650 /* add base */
13651 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13652 {
13653 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13654 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13655 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13656 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13657 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13658 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13659 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13660 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13661 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13662 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13663 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13664 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13665 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13666 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13667 /* complicated encodings */
13668 case 5:
13669 case 13:
13670 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13671 {
13672 if (!pVCpu->iem.s.uRexB)
13673 {
13674 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13675 SET_SS_DEF();
13676 }
13677 else
13678 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13679 }
13680 else
13681 {
13682 uint32_t u32Disp;
13683 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13684 u64EffAddr += (int32_t)u32Disp;
13685 }
13686 break;
13687 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13688 }
13689 break;
13690 }
13691 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13692 }
13693
13694 /* Get and add the displacement. */
13695 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13696 {
13697 case 0:
13698 break;
13699 case 1:
13700 {
13701 int8_t i8Disp;
13702 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13703 u64EffAddr += i8Disp;
13704 break;
13705 }
13706 case 2:
13707 {
13708 uint32_t u32Disp;
13709 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13710 u64EffAddr += (int32_t)u32Disp;
13711 break;
13712 }
13713 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13714 }
13715
13716 }
13717
13718 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13719 {
13720 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13721 return u64EffAddr;
13722 }
13723 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13724 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13725 return u64EffAddr & UINT32_MAX;
13726}
13727#endif /* IEM_WITH_SETJMP */
13728
13729/** @} */
13730
13731
13732
13733/*
13734 * Include the instructions
13735 */
13736#include "IEMAllInstructions.cpp.h"
13737
13738
13739
13740#ifdef LOG_ENABLED
13741/**
13742 * Logs the current instruction.
13743 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13744 * @param fSameCtx Set if we have the same context information as the VMM,
13745 * clear if we may have already executed an instruction in
13746 * our debug context. When clear, we assume IEMCPU holds
13747 * valid CPU mode info.
13748 *
13749 * The @a fSameCtx parameter is now misleading and obsolete.
13750 * @param pszFunction The IEM function doing the execution.
13751 */
13752IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13753{
13754# ifdef IN_RING3
13755 if (LogIs2Enabled())
13756 {
13757 char szInstr[256];
13758 uint32_t cbInstr = 0;
13759 if (fSameCtx)
13760 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13761 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13762 szInstr, sizeof(szInstr), &cbInstr);
13763 else
13764 {
13765 uint32_t fFlags = 0;
13766 switch (pVCpu->iem.s.enmCpuMode)
13767 {
13768 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13769 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13770 case IEMMODE_16BIT:
13771 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13772 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13773 else
13774 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13775 break;
13776 }
13777 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13778 szInstr, sizeof(szInstr), &cbInstr);
13779 }
13780
13781 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
13782 Log2(("**** %s\n"
13783 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13784 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13785 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13786 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13787 " %s\n"
13788 , pszFunction,
13789 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13790 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13791 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13792 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13793 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13794 szInstr));
13795
13796 if (LogIs3Enabled())
13797 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13798 }
13799 else
13800# endif
13801 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13802 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13803 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13804}
13805#endif /* LOG_ENABLED */
13806
13807
13808#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13809/**
13810 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
13811 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
13812 *
13813 * @returns Modified rcStrict.
13814 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13815 * @param rcStrict The instruction execution status.
13816 */
13817static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13818{
13819 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
13820 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
13821 {
13822 /* VMX preemption timer takes priority over NMI-window exits. */
13823 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13824 {
13825 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13826 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13827 }
13828 /*
13829 * Check remaining intercepts.
13830 *
13831 * NMI-window and Interrupt-window VM-exits.
13832 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13833 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13834 *
13835 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13836 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13837 */
13838 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
13839 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13840 && !TRPMHasTrap(pVCpu))
13841 {
13842 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
13843 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13844 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
13845 {
13846 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13847 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13848 }
13849 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13850 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
13851 {
13852 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13853 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13854 }
13855 }
13856 }
13857 /* TPR-below threshold/APIC write has the highest priority. */
13858 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13859 {
13860 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13861 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13862 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13863 }
13864 /* MTF takes priority over VMX-preemption timer. */
13865 else
13866 {
13867 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13868 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13869 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13870 }
13871 return rcStrict;
13872}
13873#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13874
13875
13876/**
13877 * Makes status code addjustments (pass up from I/O and access handler)
13878 * as well as maintaining statistics.
13879 *
13880 * @returns Strict VBox status code to pass up.
13881 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13882 * @param rcStrict The status from executing an instruction.
13883 */
13884DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13885{
13886 if (rcStrict != VINF_SUCCESS)
13887 {
13888 if (RT_SUCCESS(rcStrict))
13889 {
13890 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13891 || rcStrict == VINF_IOM_R3_IOPORT_READ
13892 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13893 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13894 || rcStrict == VINF_IOM_R3_MMIO_READ
13895 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13896 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13897 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13898 || rcStrict == VINF_CPUM_R3_MSR_READ
13899 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13900 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13901 || rcStrict == VINF_EM_RAW_TO_R3
13902 || rcStrict == VINF_EM_TRIPLE_FAULT
13903 || rcStrict == VINF_GIM_R3_HYPERCALL
13904 /* raw-mode / virt handlers only: */
13905 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13906 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13907 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13908 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13909 || rcStrict == VINF_SELM_SYNC_GDT
13910 || rcStrict == VINF_CSAM_PENDING_ACTION
13911 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13912 /* nested hw.virt codes: */
13913 || rcStrict == VINF_VMX_VMEXIT
13914 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
13915 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13916 || rcStrict == VINF_SVM_VMEXIT
13917 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13918/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13919 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13920#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13921 if ( rcStrict == VINF_VMX_VMEXIT
13922 && rcPassUp == VINF_SUCCESS)
13923 rcStrict = VINF_SUCCESS;
13924 else
13925#endif
13926#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13927 if ( rcStrict == VINF_SVM_VMEXIT
13928 && rcPassUp == VINF_SUCCESS)
13929 rcStrict = VINF_SUCCESS;
13930 else
13931#endif
13932 if (rcPassUp == VINF_SUCCESS)
13933 pVCpu->iem.s.cRetInfStatuses++;
13934 else if ( rcPassUp < VINF_EM_FIRST
13935 || rcPassUp > VINF_EM_LAST
13936 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13937 {
13938 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13939 pVCpu->iem.s.cRetPassUpStatus++;
13940 rcStrict = rcPassUp;
13941 }
13942 else
13943 {
13944 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13945 pVCpu->iem.s.cRetInfStatuses++;
13946 }
13947 }
13948 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13949 pVCpu->iem.s.cRetAspectNotImplemented++;
13950 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13951 pVCpu->iem.s.cRetInstrNotImplemented++;
13952 else
13953 pVCpu->iem.s.cRetErrStatuses++;
13954 }
13955 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13956 {
13957 pVCpu->iem.s.cRetPassUpStatus++;
13958 rcStrict = pVCpu->iem.s.rcPassUp;
13959 }
13960
13961 return rcStrict;
13962}
13963
13964
13965/**
13966 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13967 * IEMExecOneWithPrefetchedByPC.
13968 *
13969 * Similar code is found in IEMExecLots.
13970 *
13971 * @return Strict VBox status code.
13972 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13973 * @param fExecuteInhibit If set, execute the instruction following CLI,
13974 * POP SS and MOV SS,GR.
13975 * @param pszFunction The calling function name.
13976 */
13977DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13978{
13979 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13980 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13981 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13982 RT_NOREF_PV(pszFunction);
13983
13984#ifdef IEM_WITH_SETJMP
13985 VBOXSTRICTRC rcStrict;
13986 jmp_buf JmpBuf;
13987 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13988 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13989 if ((rcStrict = setjmp(JmpBuf)) == 0)
13990 {
13991 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13992 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13993 }
13994 else
13995 pVCpu->iem.s.cLongJumps++;
13996 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13997#else
13998 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13999 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14000#endif
14001 if (rcStrict == VINF_SUCCESS)
14002 pVCpu->iem.s.cInstructions++;
14003 if (pVCpu->iem.s.cActiveMappings > 0)
14004 {
14005 Assert(rcStrict != VINF_SUCCESS);
14006 iemMemRollback(pVCpu);
14007 }
14008 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14009 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14010 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14011
14012//#ifdef DEBUG
14013// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14014//#endif
14015
14016#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14017 /*
14018 * Perform any VMX nested-guest instruction boundary actions.
14019 *
14020 * If any of these causes a VM-exit, we must skip executing the next
14021 * instruction (would run into stale page tables). A VM-exit makes sure
14022 * there is no interrupt-inhibition, so that should ensure we don't go
14023 * to try execute the next instruction. Clearing fExecuteInhibit is
14024 * problematic because of the setjmp/longjmp clobbering above.
14025 */
14026 if ( rcStrict == VINF_SUCCESS
14027 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14028 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14029 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14030#endif
14031
14032 /* Execute the next instruction as well if a cli, pop ss or
14033 mov ss, Gr has just completed successfully. */
14034 if ( fExecuteInhibit
14035 && rcStrict == VINF_SUCCESS
14036 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14037 && EMIsInhibitInterruptsActive(pVCpu))
14038 {
14039 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
14040 if (rcStrict == VINF_SUCCESS)
14041 {
14042#ifdef LOG_ENABLED
14043 iemLogCurInstr(pVCpu, false, pszFunction);
14044#endif
14045#ifdef IEM_WITH_SETJMP
14046 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14047 if ((rcStrict = setjmp(JmpBuf)) == 0)
14048 {
14049 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14050 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14051 }
14052 else
14053 pVCpu->iem.s.cLongJumps++;
14054 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14055#else
14056 IEM_OPCODE_GET_NEXT_U8(&b);
14057 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14058#endif
14059 if (rcStrict == VINF_SUCCESS)
14060 pVCpu->iem.s.cInstructions++;
14061 if (pVCpu->iem.s.cActiveMappings > 0)
14062 {
14063 Assert(rcStrict != VINF_SUCCESS);
14064 iemMemRollback(pVCpu);
14065 }
14066 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14067 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14068 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14069 }
14070 else if (pVCpu->iem.s.cActiveMappings > 0)
14071 iemMemRollback(pVCpu);
14072 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14073 }
14074
14075 /*
14076 * Return value fiddling, statistics and sanity assertions.
14077 */
14078 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14079
14080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14081 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14082 return rcStrict;
14083}
14084
14085
14086/**
14087 * Execute one instruction.
14088 *
14089 * @return Strict VBox status code.
14090 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14091 */
14092VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14093{
14094 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
14095#ifdef LOG_ENABLED
14096 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14097#endif
14098
14099 /*
14100 * Do the decoding and emulation.
14101 */
14102 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14103 if (rcStrict == VINF_SUCCESS)
14104 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14105 else if (pVCpu->iem.s.cActiveMappings > 0)
14106 iemMemRollback(pVCpu);
14107
14108 if (rcStrict != VINF_SUCCESS)
14109 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14110 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14111 return rcStrict;
14112}
14113
14114
14115VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14116{
14117 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14118
14119 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14120 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14121 if (rcStrict == VINF_SUCCESS)
14122 {
14123 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14124 if (pcbWritten)
14125 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14126 }
14127 else if (pVCpu->iem.s.cActiveMappings > 0)
14128 iemMemRollback(pVCpu);
14129
14130 return rcStrict;
14131}
14132
14133
14134VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14135 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14136{
14137 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14138
14139 VBOXSTRICTRC rcStrict;
14140 if ( cbOpcodeBytes
14141 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14142 {
14143 iemInitDecoder(pVCpu, false, false);
14144#ifdef IEM_WITH_CODE_TLB
14145 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14146 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14147 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14148 pVCpu->iem.s.offCurInstrStart = 0;
14149 pVCpu->iem.s.offInstrNextByte = 0;
14150#else
14151 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14152 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14153#endif
14154 rcStrict = VINF_SUCCESS;
14155 }
14156 else
14157 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14158 if (rcStrict == VINF_SUCCESS)
14159 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14160 else if (pVCpu->iem.s.cActiveMappings > 0)
14161 iemMemRollback(pVCpu);
14162
14163 return rcStrict;
14164}
14165
14166
14167VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14168{
14169 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14170
14171 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14172 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14173 if (rcStrict == VINF_SUCCESS)
14174 {
14175 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14176 if (pcbWritten)
14177 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14178 }
14179 else if (pVCpu->iem.s.cActiveMappings > 0)
14180 iemMemRollback(pVCpu);
14181
14182 return rcStrict;
14183}
14184
14185
14186VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14187 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14188{
14189 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14190
14191 VBOXSTRICTRC rcStrict;
14192 if ( cbOpcodeBytes
14193 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14194 {
14195 iemInitDecoder(pVCpu, true, false);
14196#ifdef IEM_WITH_CODE_TLB
14197 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14198 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14199 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14200 pVCpu->iem.s.offCurInstrStart = 0;
14201 pVCpu->iem.s.offInstrNextByte = 0;
14202#else
14203 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14204 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14205#endif
14206 rcStrict = VINF_SUCCESS;
14207 }
14208 else
14209 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14210 if (rcStrict == VINF_SUCCESS)
14211 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14212 else if (pVCpu->iem.s.cActiveMappings > 0)
14213 iemMemRollback(pVCpu);
14214
14215 return rcStrict;
14216}
14217
14218
14219/**
14220 * For debugging DISGetParamSize, may come in handy.
14221 *
14222 * @returns Strict VBox status code.
14223 * @param pVCpu The cross context virtual CPU structure of the
14224 * calling EMT.
14225 * @param pCtxCore The context core structure.
14226 * @param OpcodeBytesPC The PC of the opcode bytes.
14227 * @param pvOpcodeBytes Prefeched opcode bytes.
14228 * @param cbOpcodeBytes Number of prefetched bytes.
14229 * @param pcbWritten Where to return the number of bytes written.
14230 * Optional.
14231 */
14232VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14233 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14234 uint32_t *pcbWritten)
14235{
14236 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14237
14238 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14239 VBOXSTRICTRC rcStrict;
14240 if ( cbOpcodeBytes
14241 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14242 {
14243 iemInitDecoder(pVCpu, true, false);
14244#ifdef IEM_WITH_CODE_TLB
14245 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14246 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14247 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14248 pVCpu->iem.s.offCurInstrStart = 0;
14249 pVCpu->iem.s.offInstrNextByte = 0;
14250#else
14251 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14252 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14253#endif
14254 rcStrict = VINF_SUCCESS;
14255 }
14256 else
14257 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14258 if (rcStrict == VINF_SUCCESS)
14259 {
14260 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14261 if (pcbWritten)
14262 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14263 }
14264 else if (pVCpu->iem.s.cActiveMappings > 0)
14265 iemMemRollback(pVCpu);
14266
14267 return rcStrict;
14268}
14269
14270
14271/**
14272 * For handling split cacheline lock operations when the host has split-lock
14273 * detection enabled.
14274 *
14275 * This will cause the interpreter to disregard the lock prefix and implicit
14276 * locking (xchg).
14277 *
14278 * @returns Strict VBox status code.
14279 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14280 */
14281VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
14282{
14283 /*
14284 * Do the decoding and emulation.
14285 */
14286 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
14287 if (rcStrict == VINF_SUCCESS)
14288 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
14289 else if (pVCpu->iem.s.cActiveMappings > 0)
14290 iemMemRollback(pVCpu);
14291
14292 if (rcStrict != VINF_SUCCESS)
14293 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14294 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14295 return rcStrict;
14296}
14297
14298
14299VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14300{
14301 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14302 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14303
14304 /*
14305 * See if there is an interrupt pending in TRPM, inject it if we can.
14306 */
14307 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14308#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14309 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14310 if (fIntrEnabled)
14311 {
14312 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14313 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14314 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14315 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14316 else
14317 {
14318 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14319 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14320 }
14321 }
14322#else
14323 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14324#endif
14325
14326 /** @todo What if we are injecting an exception and not an interrupt? Is that
14327 * possible here? For now we assert it is indeed only an interrupt. */
14328 if ( fIntrEnabled
14329 && TRPMHasTrap(pVCpu)
14330 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14331 {
14332 uint8_t u8TrapNo;
14333 TRPMEVENT enmType;
14334 uint32_t uErrCode;
14335 RTGCPTR uCr2;
14336 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14337 AssertRC(rc2);
14338 Assert(enmType == TRPM_HARDWARE_INT);
14339 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14340 TRPMResetTrap(pVCpu);
14341#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14342 /* Injecting an event may cause a VM-exit. */
14343 if ( rcStrict != VINF_SUCCESS
14344 && rcStrict != VINF_IEM_RAISED_XCPT)
14345 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14346#else
14347 NOREF(rcStrict);
14348#endif
14349 }
14350
14351 /*
14352 * Initial decoder init w/ prefetch, then setup setjmp.
14353 */
14354 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14355 if (rcStrict == VINF_SUCCESS)
14356 {
14357#ifdef IEM_WITH_SETJMP
14358 jmp_buf JmpBuf;
14359 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14360 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14361 pVCpu->iem.s.cActiveMappings = 0;
14362 if ((rcStrict = setjmp(JmpBuf)) == 0)
14363#endif
14364 {
14365 /*
14366 * The run loop. We limit ourselves to 4096 instructions right now.
14367 */
14368 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14369 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14370 for (;;)
14371 {
14372 /*
14373 * Log the state.
14374 */
14375#ifdef LOG_ENABLED
14376 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14377#endif
14378
14379 /*
14380 * Do the decoding and emulation.
14381 */
14382 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14383 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14384 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14385 {
14386 Assert(pVCpu->iem.s.cActiveMappings == 0);
14387 pVCpu->iem.s.cInstructions++;
14388 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14389 {
14390 uint64_t fCpu = pVCpu->fLocalForcedActions
14391 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14392 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14393 | VMCPU_FF_TLB_FLUSH
14394 | VMCPU_FF_INHIBIT_INTERRUPTS
14395 | VMCPU_FF_BLOCK_NMIS
14396 | VMCPU_FF_UNHALT ));
14397
14398 if (RT_LIKELY( ( !fCpu
14399 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14400 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14401 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14402 {
14403 if (cMaxInstructionsGccStupidity-- > 0)
14404 {
14405 /* Poll timers every now an then according to the caller's specs. */
14406 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14407 || !TMTimerPollBool(pVM, pVCpu))
14408 {
14409 Assert(pVCpu->iem.s.cActiveMappings == 0);
14410 iemReInitDecoder(pVCpu);
14411 continue;
14412 }
14413 }
14414 }
14415 }
14416 Assert(pVCpu->iem.s.cActiveMappings == 0);
14417 }
14418 else if (pVCpu->iem.s.cActiveMappings > 0)
14419 iemMemRollback(pVCpu);
14420 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14421 break;
14422 }
14423 }
14424#ifdef IEM_WITH_SETJMP
14425 else
14426 {
14427 if (pVCpu->iem.s.cActiveMappings > 0)
14428 iemMemRollback(pVCpu);
14429# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14430 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14431# endif
14432 pVCpu->iem.s.cLongJumps++;
14433 }
14434 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14435#endif
14436
14437 /*
14438 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14439 */
14440 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14441 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14442 }
14443 else
14444 {
14445 if (pVCpu->iem.s.cActiveMappings > 0)
14446 iemMemRollback(pVCpu);
14447
14448#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14449 /*
14450 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14451 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14452 */
14453 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14454#endif
14455 }
14456
14457 /*
14458 * Maybe re-enter raw-mode and log.
14459 */
14460 if (rcStrict != VINF_SUCCESS)
14461 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14462 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14463 if (pcInstructions)
14464 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14465 return rcStrict;
14466}
14467
14468
14469/**
14470 * Interface used by EMExecuteExec, does exit statistics and limits.
14471 *
14472 * @returns Strict VBox status code.
14473 * @param pVCpu The cross context virtual CPU structure.
14474 * @param fWillExit To be defined.
14475 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14476 * @param cMaxInstructions Maximum number of instructions to execute.
14477 * @param cMaxInstructionsWithoutExits
14478 * The max number of instructions without exits.
14479 * @param pStats Where to return statistics.
14480 */
14481VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14482 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14483{
14484 NOREF(fWillExit); /** @todo define flexible exit crits */
14485
14486 /*
14487 * Initialize return stats.
14488 */
14489 pStats->cInstructions = 0;
14490 pStats->cExits = 0;
14491 pStats->cMaxExitDistance = 0;
14492 pStats->cReserved = 0;
14493
14494 /*
14495 * Initial decoder init w/ prefetch, then setup setjmp.
14496 */
14497 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14498 if (rcStrict == VINF_SUCCESS)
14499 {
14500#ifdef IEM_WITH_SETJMP
14501 jmp_buf JmpBuf;
14502 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14503 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14504 pVCpu->iem.s.cActiveMappings = 0;
14505 if ((rcStrict = setjmp(JmpBuf)) == 0)
14506#endif
14507 {
14508#ifdef IN_RING0
14509 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14510#endif
14511 uint32_t cInstructionSinceLastExit = 0;
14512
14513 /*
14514 * The run loop. We limit ourselves to 4096 instructions right now.
14515 */
14516 PVM pVM = pVCpu->CTX_SUFF(pVM);
14517 for (;;)
14518 {
14519 /*
14520 * Log the state.
14521 */
14522#ifdef LOG_ENABLED
14523 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14524#endif
14525
14526 /*
14527 * Do the decoding and emulation.
14528 */
14529 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14530
14531 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14532 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14533
14534 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14535 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14536 {
14537 pStats->cExits += 1;
14538 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14539 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14540 cInstructionSinceLastExit = 0;
14541 }
14542
14543 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14544 {
14545 Assert(pVCpu->iem.s.cActiveMappings == 0);
14546 pVCpu->iem.s.cInstructions++;
14547 pStats->cInstructions++;
14548 cInstructionSinceLastExit++;
14549 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14550 {
14551 uint64_t fCpu = pVCpu->fLocalForcedActions
14552 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14553 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14554 | VMCPU_FF_TLB_FLUSH
14555 | VMCPU_FF_INHIBIT_INTERRUPTS
14556 | VMCPU_FF_BLOCK_NMIS
14557 | VMCPU_FF_UNHALT ));
14558
14559 if (RT_LIKELY( ( ( !fCpu
14560 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14561 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14562 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14563 || pStats->cInstructions < cMinInstructions))
14564 {
14565 if (pStats->cInstructions < cMaxInstructions)
14566 {
14567 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14568 {
14569#ifdef IN_RING0
14570 if ( !fCheckPreemptionPending
14571 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14572#endif
14573 {
14574 Assert(pVCpu->iem.s.cActiveMappings == 0);
14575 iemReInitDecoder(pVCpu);
14576 continue;
14577 }
14578#ifdef IN_RING0
14579 rcStrict = VINF_EM_RAW_INTERRUPT;
14580 break;
14581#endif
14582 }
14583 }
14584 }
14585 Assert(!(fCpu & VMCPU_FF_IEM));
14586 }
14587 Assert(pVCpu->iem.s.cActiveMappings == 0);
14588 }
14589 else if (pVCpu->iem.s.cActiveMappings > 0)
14590 iemMemRollback(pVCpu);
14591 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14592 break;
14593 }
14594 }
14595#ifdef IEM_WITH_SETJMP
14596 else
14597 {
14598 if (pVCpu->iem.s.cActiveMappings > 0)
14599 iemMemRollback(pVCpu);
14600 pVCpu->iem.s.cLongJumps++;
14601 }
14602 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14603#endif
14604
14605 /*
14606 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14607 */
14608 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14609 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14610 }
14611 else
14612 {
14613 if (pVCpu->iem.s.cActiveMappings > 0)
14614 iemMemRollback(pVCpu);
14615
14616#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14617 /*
14618 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14619 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14620 */
14621 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14622#endif
14623 }
14624
14625 /*
14626 * Maybe re-enter raw-mode and log.
14627 */
14628 if (rcStrict != VINF_SUCCESS)
14629 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14630 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14631 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14632 return rcStrict;
14633}
14634
14635
14636/**
14637 * Injects a trap, fault, abort, software interrupt or external interrupt.
14638 *
14639 * The parameter list matches TRPMQueryTrapAll pretty closely.
14640 *
14641 * @returns Strict VBox status code.
14642 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14643 * @param u8TrapNo The trap number.
14644 * @param enmType What type is it (trap/fault/abort), software
14645 * interrupt or hardware interrupt.
14646 * @param uErrCode The error code if applicable.
14647 * @param uCr2 The CR2 value if applicable.
14648 * @param cbInstr The instruction length (only relevant for
14649 * software interrupts).
14650 */
14651VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14652 uint8_t cbInstr)
14653{
14654 iemInitDecoder(pVCpu, false, false);
14655#ifdef DBGFTRACE_ENABLED
14656 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14657 u8TrapNo, enmType, uErrCode, uCr2);
14658#endif
14659
14660 uint32_t fFlags;
14661 switch (enmType)
14662 {
14663 case TRPM_HARDWARE_INT:
14664 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14665 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14666 uErrCode = uCr2 = 0;
14667 break;
14668
14669 case TRPM_SOFTWARE_INT:
14670 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14671 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14672 uErrCode = uCr2 = 0;
14673 break;
14674
14675 case TRPM_TRAP:
14676 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14677 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14678 if (u8TrapNo == X86_XCPT_PF)
14679 fFlags |= IEM_XCPT_FLAGS_CR2;
14680 switch (u8TrapNo)
14681 {
14682 case X86_XCPT_DF:
14683 case X86_XCPT_TS:
14684 case X86_XCPT_NP:
14685 case X86_XCPT_SS:
14686 case X86_XCPT_PF:
14687 case X86_XCPT_AC:
14688 case X86_XCPT_GP:
14689 fFlags |= IEM_XCPT_FLAGS_ERR;
14690 break;
14691 }
14692 break;
14693
14694 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14695 }
14696
14697 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14698
14699 if (pVCpu->iem.s.cActiveMappings > 0)
14700 iemMemRollback(pVCpu);
14701
14702 return rcStrict;
14703}
14704
14705
14706/**
14707 * Injects the active TRPM event.
14708 *
14709 * @returns Strict VBox status code.
14710 * @param pVCpu The cross context virtual CPU structure.
14711 */
14712VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14713{
14714#ifndef IEM_IMPLEMENTS_TASKSWITCH
14715 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14716#else
14717 uint8_t u8TrapNo;
14718 TRPMEVENT enmType;
14719 uint32_t uErrCode;
14720 RTGCUINTPTR uCr2;
14721 uint8_t cbInstr;
14722 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
14723 if (RT_FAILURE(rc))
14724 return rc;
14725
14726 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
14727 * ICEBP \#DB injection as a special case. */
14728 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14729#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14730 if (rcStrict == VINF_SVM_VMEXIT)
14731 rcStrict = VINF_SUCCESS;
14732#endif
14733#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14734 if (rcStrict == VINF_VMX_VMEXIT)
14735 rcStrict = VINF_SUCCESS;
14736#endif
14737 /** @todo Are there any other codes that imply the event was successfully
14738 * delivered to the guest? See @bugref{6607}. */
14739 if ( rcStrict == VINF_SUCCESS
14740 || rcStrict == VINF_IEM_RAISED_XCPT)
14741 TRPMResetTrap(pVCpu);
14742
14743 return rcStrict;
14744#endif
14745}
14746
14747
14748VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14749{
14750 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14751 return VERR_NOT_IMPLEMENTED;
14752}
14753
14754
14755VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14756{
14757 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14758 return VERR_NOT_IMPLEMENTED;
14759}
14760
14761
14762#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14763/**
14764 * Executes a IRET instruction with default operand size.
14765 *
14766 * This is for PATM.
14767 *
14768 * @returns VBox status code.
14769 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14770 * @param pCtxCore The register frame.
14771 */
14772VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14773{
14774 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14775
14776 iemCtxCoreToCtx(pCtx, pCtxCore);
14777 iemInitDecoder(pVCpu);
14778 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14779 if (rcStrict == VINF_SUCCESS)
14780 iemCtxToCtxCore(pCtxCore, pCtx);
14781 else
14782 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14783 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14784 return rcStrict;
14785}
14786#endif
14787
14788
14789/**
14790 * Macro used by the IEMExec* method to check the given instruction length.
14791 *
14792 * Will return on failure!
14793 *
14794 * @param a_cbInstr The given instruction length.
14795 * @param a_cbMin The minimum length.
14796 */
14797#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14798 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14799 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14800
14801
14802/**
14803 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14804 *
14805 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14806 *
14807 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14808 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14809 * @param rcStrict The status code to fiddle.
14810 */
14811DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14812{
14813 iemUninitExec(pVCpu);
14814 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14815}
14816
14817
14818/**
14819 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14820 *
14821 * This API ASSUMES that the caller has already verified that the guest code is
14822 * allowed to access the I/O port. (The I/O port is in the DX register in the
14823 * guest state.)
14824 *
14825 * @returns Strict VBox status code.
14826 * @param pVCpu The cross context virtual CPU structure.
14827 * @param cbValue The size of the I/O port access (1, 2, or 4).
14828 * @param enmAddrMode The addressing mode.
14829 * @param fRepPrefix Indicates whether a repeat prefix is used
14830 * (doesn't matter which for this instruction).
14831 * @param cbInstr The instruction length in bytes.
14832 * @param iEffSeg The effective segment address.
14833 * @param fIoChecked Whether the access to the I/O port has been
14834 * checked or not. It's typically checked in the
14835 * HM scenario.
14836 */
14837VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14838 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14839{
14840 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14841 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14842
14843 /*
14844 * State init.
14845 */
14846 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14847
14848 /*
14849 * Switch orgy for getting to the right handler.
14850 */
14851 VBOXSTRICTRC rcStrict;
14852 if (fRepPrefix)
14853 {
14854 switch (enmAddrMode)
14855 {
14856 case IEMMODE_16BIT:
14857 switch (cbValue)
14858 {
14859 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14860 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14861 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14862 default:
14863 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14864 }
14865 break;
14866
14867 case IEMMODE_32BIT:
14868 switch (cbValue)
14869 {
14870 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14871 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14872 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14873 default:
14874 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14875 }
14876 break;
14877
14878 case IEMMODE_64BIT:
14879 switch (cbValue)
14880 {
14881 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14882 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14883 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14884 default:
14885 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14886 }
14887 break;
14888
14889 default:
14890 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14891 }
14892 }
14893 else
14894 {
14895 switch (enmAddrMode)
14896 {
14897 case IEMMODE_16BIT:
14898 switch (cbValue)
14899 {
14900 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14901 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14902 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14903 default:
14904 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14905 }
14906 break;
14907
14908 case IEMMODE_32BIT:
14909 switch (cbValue)
14910 {
14911 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14912 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14913 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14914 default:
14915 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14916 }
14917 break;
14918
14919 case IEMMODE_64BIT:
14920 switch (cbValue)
14921 {
14922 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14923 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14924 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14925 default:
14926 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14927 }
14928 break;
14929
14930 default:
14931 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14932 }
14933 }
14934
14935 if (pVCpu->iem.s.cActiveMappings)
14936 iemMemRollback(pVCpu);
14937
14938 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14939}
14940
14941
14942/**
14943 * Interface for HM and EM for executing string I/O IN (read) instructions.
14944 *
14945 * This API ASSUMES that the caller has already verified that the guest code is
14946 * allowed to access the I/O port. (The I/O port is in the DX register in the
14947 * guest state.)
14948 *
14949 * @returns Strict VBox status code.
14950 * @param pVCpu The cross context virtual CPU structure.
14951 * @param cbValue The size of the I/O port access (1, 2, or 4).
14952 * @param enmAddrMode The addressing mode.
14953 * @param fRepPrefix Indicates whether a repeat prefix is used
14954 * (doesn't matter which for this instruction).
14955 * @param cbInstr The instruction length in bytes.
14956 * @param fIoChecked Whether the access to the I/O port has been
14957 * checked or not. It's typically checked in the
14958 * HM scenario.
14959 */
14960VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14961 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14962{
14963 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14964
14965 /*
14966 * State init.
14967 */
14968 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14969
14970 /*
14971 * Switch orgy for getting to the right handler.
14972 */
14973 VBOXSTRICTRC rcStrict;
14974 if (fRepPrefix)
14975 {
14976 switch (enmAddrMode)
14977 {
14978 case IEMMODE_16BIT:
14979 switch (cbValue)
14980 {
14981 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14982 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14983 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14984 default:
14985 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14986 }
14987 break;
14988
14989 case IEMMODE_32BIT:
14990 switch (cbValue)
14991 {
14992 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14993 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14994 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14995 default:
14996 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14997 }
14998 break;
14999
15000 case IEMMODE_64BIT:
15001 switch (cbValue)
15002 {
15003 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15004 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15005 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15006 default:
15007 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15008 }
15009 break;
15010
15011 default:
15012 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15013 }
15014 }
15015 else
15016 {
15017 switch (enmAddrMode)
15018 {
15019 case IEMMODE_16BIT:
15020 switch (cbValue)
15021 {
15022 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15023 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15024 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15025 default:
15026 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15027 }
15028 break;
15029
15030 case IEMMODE_32BIT:
15031 switch (cbValue)
15032 {
15033 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15034 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15035 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15036 default:
15037 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15038 }
15039 break;
15040
15041 case IEMMODE_64BIT:
15042 switch (cbValue)
15043 {
15044 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15045 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15046 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15047 default:
15048 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15049 }
15050 break;
15051
15052 default:
15053 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15054 }
15055 }
15056
15057 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15058 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15059}
15060
15061
15062/**
15063 * Interface for rawmode to write execute an OUT instruction.
15064 *
15065 * @returns Strict VBox status code.
15066 * @param pVCpu The cross context virtual CPU structure.
15067 * @param cbInstr The instruction length in bytes.
15068 * @param u16Port The port to read.
15069 * @param fImm Whether the port is specified using an immediate operand or
15070 * using the implicit DX register.
15071 * @param cbReg The register size.
15072 *
15073 * @remarks In ring-0 not all of the state needs to be synced in.
15074 */
15075VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15076{
15077 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15078 Assert(cbReg <= 4 && cbReg != 3);
15079
15080 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15081 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15082 Assert(!pVCpu->iem.s.cActiveMappings);
15083 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15084}
15085
15086
15087/**
15088 * Interface for rawmode to write execute an IN instruction.
15089 *
15090 * @returns Strict VBox status code.
15091 * @param pVCpu The cross context virtual CPU structure.
15092 * @param cbInstr The instruction length in bytes.
15093 * @param u16Port The port to read.
15094 * @param fImm Whether the port is specified using an immediate operand or
15095 * using the implicit DX.
15096 * @param cbReg The register size.
15097 */
15098VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15099{
15100 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15101 Assert(cbReg <= 4 && cbReg != 3);
15102
15103 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15104 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15105 Assert(!pVCpu->iem.s.cActiveMappings);
15106 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15107}
15108
15109
15110/**
15111 * Interface for HM and EM to write to a CRx register.
15112 *
15113 * @returns Strict VBox status code.
15114 * @param pVCpu The cross context virtual CPU structure.
15115 * @param cbInstr The instruction length in bytes.
15116 * @param iCrReg The control register number (destination).
15117 * @param iGReg The general purpose register number (source).
15118 *
15119 * @remarks In ring-0 not all of the state needs to be synced in.
15120 */
15121VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15122{
15123 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15124 Assert(iCrReg < 16);
15125 Assert(iGReg < 16);
15126
15127 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15128 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15129 Assert(!pVCpu->iem.s.cActiveMappings);
15130 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15131}
15132
15133
15134/**
15135 * Interface for HM and EM to read from a CRx register.
15136 *
15137 * @returns Strict VBox status code.
15138 * @param pVCpu The cross context virtual CPU structure.
15139 * @param cbInstr The instruction length in bytes.
15140 * @param iGReg The general purpose register number (destination).
15141 * @param iCrReg The control register number (source).
15142 *
15143 * @remarks In ring-0 not all of the state needs to be synced in.
15144 */
15145VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15146{
15147 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15148 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15149 | CPUMCTX_EXTRN_APIC_TPR);
15150 Assert(iCrReg < 16);
15151 Assert(iGReg < 16);
15152
15153 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15154 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15155 Assert(!pVCpu->iem.s.cActiveMappings);
15156 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15157}
15158
15159
15160/**
15161 * Interface for HM and EM to clear the CR0[TS] bit.
15162 *
15163 * @returns Strict VBox status code.
15164 * @param pVCpu The cross context virtual CPU structure.
15165 * @param cbInstr The instruction length in bytes.
15166 *
15167 * @remarks In ring-0 not all of the state needs to be synced in.
15168 */
15169VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15170{
15171 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15172
15173 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15174 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15175 Assert(!pVCpu->iem.s.cActiveMappings);
15176 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15177}
15178
15179
15180/**
15181 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15182 *
15183 * @returns Strict VBox status code.
15184 * @param pVCpu The cross context virtual CPU structure.
15185 * @param cbInstr The instruction length in bytes.
15186 * @param uValue The value to load into CR0.
15187 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15188 * memory operand. Otherwise pass NIL_RTGCPTR.
15189 *
15190 * @remarks In ring-0 not all of the state needs to be synced in.
15191 */
15192VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15193{
15194 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15195
15196 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15197 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15198 Assert(!pVCpu->iem.s.cActiveMappings);
15199 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15200}
15201
15202
15203/**
15204 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15205 *
15206 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15207 *
15208 * @returns Strict VBox status code.
15209 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15210 * @param cbInstr The instruction length in bytes.
15211 * @remarks In ring-0 not all of the state needs to be synced in.
15212 * @thread EMT(pVCpu)
15213 */
15214VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15215{
15216 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15217
15218 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15219 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15220 Assert(!pVCpu->iem.s.cActiveMappings);
15221 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15222}
15223
15224
15225/**
15226 * Interface for HM and EM to emulate the WBINVD instruction.
15227 *
15228 * @returns Strict VBox status code.
15229 * @param pVCpu The cross context virtual CPU structure.
15230 * @param cbInstr The instruction length in bytes.
15231 *
15232 * @remarks In ring-0 not all of the state needs to be synced in.
15233 */
15234VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15235{
15236 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15237
15238 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15239 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15240 Assert(!pVCpu->iem.s.cActiveMappings);
15241 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15242}
15243
15244
15245/**
15246 * Interface for HM and EM to emulate the INVD instruction.
15247 *
15248 * @returns Strict VBox status code.
15249 * @param pVCpu The cross context virtual CPU structure.
15250 * @param cbInstr The instruction length in bytes.
15251 *
15252 * @remarks In ring-0 not all of the state needs to be synced in.
15253 */
15254VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15255{
15256 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15257
15258 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15259 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15260 Assert(!pVCpu->iem.s.cActiveMappings);
15261 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15262}
15263
15264
15265/**
15266 * Interface for HM and EM to emulate the INVLPG instruction.
15267 *
15268 * @returns Strict VBox status code.
15269 * @retval VINF_PGM_SYNC_CR3
15270 *
15271 * @param pVCpu The cross context virtual CPU structure.
15272 * @param cbInstr The instruction length in bytes.
15273 * @param GCPtrPage The effective address of the page to invalidate.
15274 *
15275 * @remarks In ring-0 not all of the state needs to be synced in.
15276 */
15277VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15278{
15279 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15280
15281 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15282 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15283 Assert(!pVCpu->iem.s.cActiveMappings);
15284 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15285}
15286
15287
15288/**
15289 * Interface for HM and EM to emulate the INVPCID instruction.
15290 *
15291 * @returns Strict VBox status code.
15292 * @retval VINF_PGM_SYNC_CR3
15293 *
15294 * @param pVCpu The cross context virtual CPU structure.
15295 * @param cbInstr The instruction length in bytes.
15296 * @param iEffSeg The effective segment register.
15297 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15298 * @param uType The invalidation type.
15299 *
15300 * @remarks In ring-0 not all of the state needs to be synced in.
15301 */
15302VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15303 uint64_t uType)
15304{
15305 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15306
15307 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15308 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15309 Assert(!pVCpu->iem.s.cActiveMappings);
15310 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15311}
15312
15313
15314/**
15315 * Interface for HM and EM to emulate the CPUID instruction.
15316 *
15317 * @returns Strict VBox status code.
15318 *
15319 * @param pVCpu The cross context virtual CPU structure.
15320 * @param cbInstr The instruction length in bytes.
15321 *
15322 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15323 */
15324VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15325{
15326 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15327 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15328
15329 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15330 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15331 Assert(!pVCpu->iem.s.cActiveMappings);
15332 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15333}
15334
15335
15336/**
15337 * Interface for HM and EM to emulate the RDPMC instruction.
15338 *
15339 * @returns Strict VBox status code.
15340 *
15341 * @param pVCpu The cross context virtual CPU structure.
15342 * @param cbInstr The instruction length in bytes.
15343 *
15344 * @remarks Not all of the state needs to be synced in.
15345 */
15346VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15347{
15348 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15349 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15350
15351 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15352 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15353 Assert(!pVCpu->iem.s.cActiveMappings);
15354 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15355}
15356
15357
15358/**
15359 * Interface for HM and EM to emulate the RDTSC instruction.
15360 *
15361 * @returns Strict VBox status code.
15362 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15363 *
15364 * @param pVCpu The cross context virtual CPU structure.
15365 * @param cbInstr The instruction length in bytes.
15366 *
15367 * @remarks Not all of the state needs to be synced in.
15368 */
15369VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15370{
15371 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15372 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15373
15374 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15375 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15376 Assert(!pVCpu->iem.s.cActiveMappings);
15377 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15378}
15379
15380
15381/**
15382 * Interface for HM and EM to emulate the RDTSCP instruction.
15383 *
15384 * @returns Strict VBox status code.
15385 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15386 *
15387 * @param pVCpu The cross context virtual CPU structure.
15388 * @param cbInstr The instruction length in bytes.
15389 *
15390 * @remarks Not all of the state needs to be synced in. Recommended
15391 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15392 */
15393VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15394{
15395 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15396 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15397
15398 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15399 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15400 Assert(!pVCpu->iem.s.cActiveMappings);
15401 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15402}
15403
15404
15405/**
15406 * Interface for HM and EM to emulate the RDMSR instruction.
15407 *
15408 * @returns Strict VBox status code.
15409 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15410 *
15411 * @param pVCpu The cross context virtual CPU structure.
15412 * @param cbInstr The instruction length in bytes.
15413 *
15414 * @remarks Not all of the state needs to be synced in. Requires RCX and
15415 * (currently) all MSRs.
15416 */
15417VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15418{
15419 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15420 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15421
15422 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15423 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15424 Assert(!pVCpu->iem.s.cActiveMappings);
15425 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15426}
15427
15428
15429/**
15430 * Interface for HM and EM to emulate the WRMSR instruction.
15431 *
15432 * @returns Strict VBox status code.
15433 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15434 *
15435 * @param pVCpu The cross context virtual CPU structure.
15436 * @param cbInstr The instruction length in bytes.
15437 *
15438 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15439 * and (currently) all MSRs.
15440 */
15441VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15442{
15443 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15444 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15445 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15446
15447 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15448 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15449 Assert(!pVCpu->iem.s.cActiveMappings);
15450 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15451}
15452
15453
15454/**
15455 * Interface for HM and EM to emulate the MONITOR instruction.
15456 *
15457 * @returns Strict VBox status code.
15458 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15459 *
15460 * @param pVCpu The cross context virtual CPU structure.
15461 * @param cbInstr The instruction length in bytes.
15462 *
15463 * @remarks Not all of the state needs to be synced in.
15464 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15465 * are used.
15466 */
15467VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15468{
15469 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15470 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15471
15472 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15473 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15474 Assert(!pVCpu->iem.s.cActiveMappings);
15475 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15476}
15477
15478
15479/**
15480 * Interface for HM and EM to emulate the MWAIT instruction.
15481 *
15482 * @returns Strict VBox status code.
15483 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15484 *
15485 * @param pVCpu The cross context virtual CPU structure.
15486 * @param cbInstr The instruction length in bytes.
15487 *
15488 * @remarks Not all of the state needs to be synced in.
15489 */
15490VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15491{
15492 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15493 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15494
15495 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15496 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15497 Assert(!pVCpu->iem.s.cActiveMappings);
15498 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15499}
15500
15501
15502/**
15503 * Interface for HM and EM to emulate the HLT instruction.
15504 *
15505 * @returns Strict VBox status code.
15506 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15507 *
15508 * @param pVCpu The cross context virtual CPU structure.
15509 * @param cbInstr The instruction length in bytes.
15510 *
15511 * @remarks Not all of the state needs to be synced in.
15512 */
15513VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15514{
15515 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15516
15517 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15518 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15519 Assert(!pVCpu->iem.s.cActiveMappings);
15520 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15521}
15522
15523
15524/**
15525 * Checks if IEM is in the process of delivering an event (interrupt or
15526 * exception).
15527 *
15528 * @returns true if we're in the process of raising an interrupt or exception,
15529 * false otherwise.
15530 * @param pVCpu The cross context virtual CPU structure.
15531 * @param puVector Where to store the vector associated with the
15532 * currently delivered event, optional.
15533 * @param pfFlags Where to store th event delivery flags (see
15534 * IEM_XCPT_FLAGS_XXX), optional.
15535 * @param puErr Where to store the error code associated with the
15536 * event, optional.
15537 * @param puCr2 Where to store the CR2 associated with the event,
15538 * optional.
15539 * @remarks The caller should check the flags to determine if the error code and
15540 * CR2 are valid for the event.
15541 */
15542VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15543{
15544 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15545 if (fRaisingXcpt)
15546 {
15547 if (puVector)
15548 *puVector = pVCpu->iem.s.uCurXcpt;
15549 if (pfFlags)
15550 *pfFlags = pVCpu->iem.s.fCurXcpt;
15551 if (puErr)
15552 *puErr = pVCpu->iem.s.uCurXcptErr;
15553 if (puCr2)
15554 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15555 }
15556 return fRaisingXcpt;
15557}
15558
15559#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15560
15561/**
15562 * Interface for HM and EM to emulate the CLGI instruction.
15563 *
15564 * @returns Strict VBox status code.
15565 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15566 * @param cbInstr The instruction length in bytes.
15567 * @thread EMT(pVCpu)
15568 */
15569VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15570{
15571 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15572
15573 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15574 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15575 Assert(!pVCpu->iem.s.cActiveMappings);
15576 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15577}
15578
15579
15580/**
15581 * Interface for HM and EM to emulate the STGI instruction.
15582 *
15583 * @returns Strict VBox status code.
15584 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15585 * @param cbInstr The instruction length in bytes.
15586 * @thread EMT(pVCpu)
15587 */
15588VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15589{
15590 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15591
15592 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15593 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15594 Assert(!pVCpu->iem.s.cActiveMappings);
15595 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15596}
15597
15598
15599/**
15600 * Interface for HM and EM to emulate the VMLOAD instruction.
15601 *
15602 * @returns Strict VBox status code.
15603 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15604 * @param cbInstr The instruction length in bytes.
15605 * @thread EMT(pVCpu)
15606 */
15607VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15608{
15609 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15610
15611 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15612 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15613 Assert(!pVCpu->iem.s.cActiveMappings);
15614 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15615}
15616
15617
15618/**
15619 * Interface for HM and EM to emulate the VMSAVE instruction.
15620 *
15621 * @returns Strict VBox status code.
15622 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15623 * @param cbInstr The instruction length in bytes.
15624 * @thread EMT(pVCpu)
15625 */
15626VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15627{
15628 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15629
15630 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15631 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15632 Assert(!pVCpu->iem.s.cActiveMappings);
15633 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15634}
15635
15636
15637/**
15638 * Interface for HM and EM to emulate the INVLPGA instruction.
15639 *
15640 * @returns Strict VBox status code.
15641 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15642 * @param cbInstr The instruction length in bytes.
15643 * @thread EMT(pVCpu)
15644 */
15645VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15646{
15647 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15648
15649 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15650 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15651 Assert(!pVCpu->iem.s.cActiveMappings);
15652 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15653}
15654
15655
15656/**
15657 * Interface for HM and EM to emulate the VMRUN instruction.
15658 *
15659 * @returns Strict VBox status code.
15660 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15661 * @param cbInstr The instruction length in bytes.
15662 * @thread EMT(pVCpu)
15663 */
15664VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15665{
15666 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15667 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15668
15669 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15670 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15671 Assert(!pVCpu->iem.s.cActiveMappings);
15672 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15673}
15674
15675
15676/**
15677 * Interface for HM and EM to emulate \#VMEXIT.
15678 *
15679 * @returns Strict VBox status code.
15680 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15681 * @param uExitCode The exit code.
15682 * @param uExitInfo1 The exit info. 1 field.
15683 * @param uExitInfo2 The exit info. 2 field.
15684 * @thread EMT(pVCpu)
15685 */
15686VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15687{
15688 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15689 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15690 if (pVCpu->iem.s.cActiveMappings)
15691 iemMemRollback(pVCpu);
15692 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15693}
15694
15695#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15696
15697#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15698
15699/**
15700 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15701 *
15702 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15703 * are performed. Bounds checks are strict builds only.
15704 *
15705 * @param pVmcs Pointer to the virtual VMCS.
15706 * @param u64VmcsField The VMCS field.
15707 * @param pu64Dst Where to store the VMCS value.
15708 *
15709 * @remarks May be called with interrupts disabled.
15710 * @todo This should probably be moved to CPUM someday.
15711 */
15712VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15713{
15714 AssertPtr(pVmcs);
15715 AssertPtr(pu64Dst);
15716 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15717}
15718
15719
15720/**
15721 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15722 *
15723 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15724 * are performed. Bounds checks are strict builds only.
15725 *
15726 * @param pVmcs Pointer to the virtual VMCS.
15727 * @param u64VmcsField The VMCS field.
15728 * @param u64Val The value to write.
15729 *
15730 * @remarks May be called with interrupts disabled.
15731 * @todo This should probably be moved to CPUM someday.
15732 */
15733VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15734{
15735 AssertPtr(pVmcs);
15736 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15737}
15738
15739
15740/**
15741 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15742 *
15743 * @returns Strict VBox status code.
15744 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15745 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15746 * the x2APIC device.
15747 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15748 *
15749 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15750 * @param idMsr The MSR being read.
15751 * @param pu64Value Pointer to the value being written or where to store the
15752 * value being read.
15753 * @param fWrite Whether this is an MSR write or read access.
15754 * @thread EMT(pVCpu)
15755 */
15756VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15757{
15758 Assert(pu64Value);
15759
15760 VBOXSTRICTRC rcStrict;
15761 if (fWrite)
15762 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15763 else
15764 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15765 Assert(!pVCpu->iem.s.cActiveMappings);
15766 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15767
15768}
15769
15770
15771/**
15772 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15773 *
15774 * @returns Strict VBox status code.
15775 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15776 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15777 *
15778 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15779 * @param pExitInfo Pointer to the VM-exit information.
15780 * @param pExitEventInfo Pointer to the VM-exit event information.
15781 * @thread EMT(pVCpu)
15782 */
15783VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15784{
15785 Assert(pExitInfo);
15786 Assert(pExitEventInfo);
15787 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15788 Assert(!pVCpu->iem.s.cActiveMappings);
15789 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15790
15791}
15792
15793
15794/**
15795 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15796 * VM-exit.
15797 *
15798 * @returns Strict VBox status code.
15799 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15800 * @thread EMT(pVCpu)
15801 */
15802VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15803{
15804 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15805 Assert(!pVCpu->iem.s.cActiveMappings);
15806 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15807}
15808
15809
15810/**
15811 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15812 *
15813 * @returns Strict VBox status code.
15814 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15815 * @thread EMT(pVCpu)
15816 */
15817VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15818{
15819 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15820 Assert(!pVCpu->iem.s.cActiveMappings);
15821 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15822}
15823
15824
15825/**
15826 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15827 *
15828 * @returns Strict VBox status code.
15829 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15830 * @param uVector The external interrupt vector (pass 0 if the external
15831 * interrupt is still pending).
15832 * @param fIntPending Whether the external interrupt is pending or
15833 * acknowdledged in the interrupt controller.
15834 * @thread EMT(pVCpu)
15835 */
15836VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15837{
15838 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15839 Assert(!pVCpu->iem.s.cActiveMappings);
15840 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15841}
15842
15843
15844/**
15845 * Interface for HM and EM to emulate VM-exit due to exceptions.
15846 *
15847 * Exception includes NMIs, software exceptions (those generated by INT3 or
15848 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15849 *
15850 * @returns Strict VBox status code.
15851 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15852 * @param pExitInfo Pointer to the VM-exit information.
15853 * @param pExitEventInfo Pointer to the VM-exit event information.
15854 * @thread EMT(pVCpu)
15855 */
15856VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15857{
15858 Assert(pExitInfo);
15859 Assert(pExitEventInfo);
15860 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15861 Assert(!pVCpu->iem.s.cActiveMappings);
15862 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15863}
15864
15865
15866/**
15867 * Interface for HM and EM to emulate VM-exit due to NMIs.
15868 *
15869 * @returns Strict VBox status code.
15870 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15871 * @thread EMT(pVCpu)
15872 */
15873VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15874{
15875 VMXVEXITINFO ExitInfo;
15876 RT_ZERO(ExitInfo);
15877 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
15878
15879 VMXVEXITEVENTINFO ExitEventInfo;
15880 RT_ZERO(ExitEventInfo);
15881 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15882 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15883 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15884
15885 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15886 Assert(!pVCpu->iem.s.cActiveMappings);
15887 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15888}
15889
15890
15891/**
15892 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15893 *
15894 * @returns Strict VBox status code.
15895 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15896 * @thread EMT(pVCpu)
15897 */
15898VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15899{
15900 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15901 Assert(!pVCpu->iem.s.cActiveMappings);
15902 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15903}
15904
15905
15906/**
15907 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15908 *
15909 * @returns Strict VBox status code.
15910 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15911 * @param uVector The SIPI vector.
15912 * @thread EMT(pVCpu)
15913 */
15914VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15915{
15916 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15917 Assert(!pVCpu->iem.s.cActiveMappings);
15918 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15919}
15920
15921
15922/**
15923 * Interface for HM and EM to emulate a VM-exit.
15924 *
15925 * If a specialized version of a VM-exit handler exists, that must be used instead.
15926 *
15927 * @returns Strict VBox status code.
15928 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15929 * @param uExitReason The VM-exit reason.
15930 * @param u64ExitQual The Exit qualification.
15931 * @thread EMT(pVCpu)
15932 */
15933VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15934{
15935 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15936 Assert(!pVCpu->iem.s.cActiveMappings);
15937 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15938}
15939
15940
15941/**
15942 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15943 *
15944 * This is meant to be used for those instructions that VMX provides additional
15945 * decoding information beyond just the instruction length!
15946 *
15947 * @returns Strict VBox status code.
15948 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15949 * @param pExitInfo Pointer to the VM-exit information.
15950 * @thread EMT(pVCpu)
15951 */
15952VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15953{
15954 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15955 Assert(!pVCpu->iem.s.cActiveMappings);
15956 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15957}
15958
15959
15960/**
15961 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15962 *
15963 * This is meant to be used for those instructions that VMX provides only the
15964 * instruction length.
15965 *
15966 * @returns Strict VBox status code.
15967 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15968 * @param pExitInfo Pointer to the VM-exit information.
15969 * @param cbInstr The instruction length in bytes.
15970 * @thread EMT(pVCpu)
15971 */
15972VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15973{
15974 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15975 Assert(!pVCpu->iem.s.cActiveMappings);
15976 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15977}
15978
15979
15980/**
15981 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
15982 * Virtualized-EOI, TPR-below threshold).
15983 *
15984 * @returns Strict VBox status code.
15985 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15986 * @param pExitInfo Pointer to the VM-exit information.
15987 * @thread EMT(pVCpu)
15988 */
15989VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15990{
15991 Assert(pExitInfo);
15992 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
15993 Assert(!pVCpu->iem.s.cActiveMappings);
15994 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15995}
15996
15997
15998/**
15999 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16000 *
16001 * @returns Strict VBox status code.
16002 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16003 * @param pExitInfo Pointer to the VM-exit information.
16004 * @param pExitEventInfo Pointer to the VM-exit event information.
16005 * @thread EMT(pVCpu)
16006 */
16007VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16008{
16009 Assert(pExitInfo);
16010 Assert(pExitEventInfo);
16011 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16012 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16013 Assert(!pVCpu->iem.s.cActiveMappings);
16014 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16015}
16016
16017
16018/**
16019 * Interface for HM and EM to emulate the VMREAD instruction.
16020 *
16021 * @returns Strict VBox status code.
16022 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16023 * @param pExitInfo Pointer to the VM-exit information.
16024 * @thread EMT(pVCpu)
16025 */
16026VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16027{
16028 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16029 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16030 Assert(pExitInfo);
16031
16032 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16033
16034 VBOXSTRICTRC rcStrict;
16035 uint8_t const cbInstr = pExitInfo->cbInstr;
16036 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16037 uint64_t const u64FieldEnc = fIs64BitMode
16038 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16039 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16040 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16041 {
16042 if (fIs64BitMode)
16043 {
16044 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16045 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16046 }
16047 else
16048 {
16049 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16050 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16051 }
16052 }
16053 else
16054 {
16055 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16056 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16057 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16058 }
16059 Assert(!pVCpu->iem.s.cActiveMappings);
16060 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16061}
16062
16063
16064/**
16065 * Interface for HM and EM to emulate the VMWRITE instruction.
16066 *
16067 * @returns Strict VBox status code.
16068 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16069 * @param pExitInfo Pointer to the VM-exit information.
16070 * @thread EMT(pVCpu)
16071 */
16072VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16073{
16074 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16075 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16076 Assert(pExitInfo);
16077
16078 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16079
16080 uint64_t u64Val;
16081 uint8_t iEffSeg;
16082 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16083 {
16084 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16085 iEffSeg = UINT8_MAX;
16086 }
16087 else
16088 {
16089 u64Val = pExitInfo->GCPtrEffAddr;
16090 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16091 }
16092 uint8_t const cbInstr = pExitInfo->cbInstr;
16093 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16094 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16095 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16096 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16097 Assert(!pVCpu->iem.s.cActiveMappings);
16098 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16099}
16100
16101
16102/**
16103 * Interface for HM and EM to emulate the VMPTRLD instruction.
16104 *
16105 * @returns Strict VBox status code.
16106 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16107 * @param pExitInfo Pointer to the VM-exit information.
16108 * @thread EMT(pVCpu)
16109 */
16110VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16111{
16112 Assert(pExitInfo);
16113 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16114 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16115
16116 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16117
16118 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16119 uint8_t const cbInstr = pExitInfo->cbInstr;
16120 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16121 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16122 Assert(!pVCpu->iem.s.cActiveMappings);
16123 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16124}
16125
16126
16127/**
16128 * Interface for HM and EM to emulate the VMPTRST instruction.
16129 *
16130 * @returns Strict VBox status code.
16131 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16132 * @param pExitInfo Pointer to the VM-exit information.
16133 * @thread EMT(pVCpu)
16134 */
16135VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16136{
16137 Assert(pExitInfo);
16138 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16139 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16140
16141 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16142
16143 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16144 uint8_t const cbInstr = pExitInfo->cbInstr;
16145 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16146 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16147 Assert(!pVCpu->iem.s.cActiveMappings);
16148 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16149}
16150
16151
16152/**
16153 * Interface for HM and EM to emulate the VMCLEAR instruction.
16154 *
16155 * @returns Strict VBox status code.
16156 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16157 * @param pExitInfo Pointer to the VM-exit information.
16158 * @thread EMT(pVCpu)
16159 */
16160VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16161{
16162 Assert(pExitInfo);
16163 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16164 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16165
16166 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16167
16168 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16169 uint8_t const cbInstr = pExitInfo->cbInstr;
16170 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16171 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16172 Assert(!pVCpu->iem.s.cActiveMappings);
16173 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16174}
16175
16176
16177/**
16178 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16179 *
16180 * @returns Strict VBox status code.
16181 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16182 * @param cbInstr The instruction length in bytes.
16183 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16184 * VMXINSTRID_VMRESUME).
16185 * @thread EMT(pVCpu)
16186 */
16187VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16188{
16189 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16190 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16191
16192 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16193 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16194 Assert(!pVCpu->iem.s.cActiveMappings);
16195 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16196}
16197
16198
16199/**
16200 * Interface for HM and EM to emulate the VMXON instruction.
16201 *
16202 * @returns Strict VBox status code.
16203 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16204 * @param pExitInfo Pointer to the VM-exit information.
16205 * @thread EMT(pVCpu)
16206 */
16207VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16208{
16209 Assert(pExitInfo);
16210 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16211 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16212
16213 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16214
16215 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16216 uint8_t const cbInstr = pExitInfo->cbInstr;
16217 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16218 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16219 Assert(!pVCpu->iem.s.cActiveMappings);
16220 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16221}
16222
16223
16224/**
16225 * Interface for HM and EM to emulate the VMXOFF instruction.
16226 *
16227 * @returns Strict VBox status code.
16228 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16229 * @param cbInstr The instruction length in bytes.
16230 * @thread EMT(pVCpu)
16231 */
16232VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16233{
16234 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16235 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16236
16237 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16238 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16239 Assert(!pVCpu->iem.s.cActiveMappings);
16240 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16241}
16242
16243
16244/**
16245 * Interface for HM and EM to emulate the INVVPID instruction.
16246 *
16247 * @returns Strict VBox status code.
16248 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16249 * @param pExitInfo Pointer to the VM-exit information.
16250 * @thread EMT(pVCpu)
16251 */
16252VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16253{
16254 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16255 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16256 Assert(pExitInfo);
16257
16258 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16259
16260 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16261 uint8_t const cbInstr = pExitInfo->cbInstr;
16262 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16263 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16264 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16265 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16266 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16267 Assert(!pVCpu->iem.s.cActiveMappings);
16268 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16269}
16270
16271
16272/**
16273 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16274 *
16275 * @remarks The @a pvUser argument is currently unused.
16276 */
16277PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16278 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16279 PGMACCESSORIGIN enmOrigin, void *pvUser)
16280{
16281 RT_NOREF3(pvPhys, enmOrigin, pvUser);
16282
16283 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16284 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16285 {
16286 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16287 Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16288
16289 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16290 * Currently they will go through as read accesses. */
16291 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16292 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16293 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16294 if (RT_FAILURE(rcStrict))
16295 return rcStrict;
16296
16297 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16298 return VINF_SUCCESS;
16299 }
16300
16301 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16302 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16303 if (RT_FAILURE(rc))
16304 return rc;
16305
16306 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16307 return VINF_PGM_HANDLER_DO_DEFAULT;
16308}
16309
16310#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16311
16312#ifdef IN_RING3
16313
16314/**
16315 * Handles the unlikely and probably fatal merge cases.
16316 *
16317 * @returns Merged status code.
16318 * @param rcStrict Current EM status code.
16319 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16320 * with @a rcStrict.
16321 * @param iMemMap The memory mapping index. For error reporting only.
16322 * @param pVCpu The cross context virtual CPU structure of the calling
16323 * thread, for error reporting only.
16324 */
16325DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16326 unsigned iMemMap, PVMCPUCC pVCpu)
16327{
16328 if (RT_FAILURE_NP(rcStrict))
16329 return rcStrict;
16330
16331 if (RT_FAILURE_NP(rcStrictCommit))
16332 return rcStrictCommit;
16333
16334 if (rcStrict == rcStrictCommit)
16335 return rcStrictCommit;
16336
16337 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16338 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16339 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16340 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16341 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16342 return VERR_IOM_FF_STATUS_IPE;
16343}
16344
16345
16346/**
16347 * Helper for IOMR3ProcessForceFlag.
16348 *
16349 * @returns Merged status code.
16350 * @param rcStrict Current EM status code.
16351 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16352 * with @a rcStrict.
16353 * @param iMemMap The memory mapping index. For error reporting only.
16354 * @param pVCpu The cross context virtual CPU structure of the calling
16355 * thread, for error reporting only.
16356 */
16357DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16358{
16359 /* Simple. */
16360 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16361 return rcStrictCommit;
16362
16363 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16364 return rcStrict;
16365
16366 /* EM scheduling status codes. */
16367 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16368 && rcStrict <= VINF_EM_LAST))
16369 {
16370 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16371 && rcStrictCommit <= VINF_EM_LAST))
16372 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16373 }
16374
16375 /* Unlikely */
16376 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16377}
16378
16379
16380/**
16381 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16382 *
16383 * @returns Merge between @a rcStrict and what the commit operation returned.
16384 * @param pVM The cross context VM structure.
16385 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16386 * @param rcStrict The status code returned by ring-0 or raw-mode.
16387 */
16388VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16389{
16390 /*
16391 * Reset the pending commit.
16392 */
16393 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16394 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16395 ("%#x %#x %#x\n",
16396 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16397 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16398
16399 /*
16400 * Commit the pending bounce buffers (usually just one).
16401 */
16402 unsigned cBufs = 0;
16403 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16404 while (iMemMap-- > 0)
16405 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16406 {
16407 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16408 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16409 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16410
16411 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16412 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16413 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16414
16415 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16416 {
16417 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16418 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16419 pbBuf,
16420 cbFirst,
16421 PGMACCESSORIGIN_IEM);
16422 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16423 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16424 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16425 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16426 }
16427
16428 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16429 {
16430 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16431 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16432 pbBuf + cbFirst,
16433 cbSecond,
16434 PGMACCESSORIGIN_IEM);
16435 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16436 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16437 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16438 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16439 }
16440 cBufs++;
16441 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16442 }
16443
16444 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16445 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16446 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16447 pVCpu->iem.s.cActiveMappings = 0;
16448 return rcStrict;
16449}
16450
16451#endif /* IN_RING3 */
16452
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette