VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 91247

Last change on this file since 91247 was 91016, checked in by vboxsync, 3 years ago

VMM/PGM,++: Kicked out VBOX_WITH_2X_4GB_ADDR_SPACE and the DynMap code used by it and raw-mode. Kept this around in case we wanted to reuse it for SMAP workarounds, but that's no longer needed. bugref:9517 bugref:9627

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 653.0 KB
Line 
1/* $Id: IEMAll.cpp 91016 2021-08-31 01:23:53Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler.
442 */
443# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
444 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
445
446#else
447# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
448# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
450# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
453# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
457
458#endif
459
460#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
461/**
462 * Check if an SVM control/instruction intercept is set.
463 */
464# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
465 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
466
467/**
468 * Check if an SVM read CRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
471 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
472
473/**
474 * Check if an SVM write CRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM read DRx intercept is set.
481 */
482# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
483 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
484
485/**
486 * Check if an SVM write DRx intercept is set.
487 */
488# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM exception intercept is set.
493 */
494# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
495 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
496
497/**
498 * Invokes the SVM \#VMEXIT handler for the nested-guest.
499 */
500# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
501 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
502
503/**
504 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
505 * corresponding decode assist information.
506 */
507# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
508 do \
509 { \
510 uint64_t uExitInfo1; \
511 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
512 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
513 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
514 else \
515 uExitInfo1 = 0; \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
517 } while (0)
518
519/** Check and handles SVM nested-guest instruction intercept and updates
520 * NRIP if needed.
521 */
522# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
523 do \
524 { \
525 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
526 { \
527 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
528 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
529 } \
530 } while (0)
531
532/** Checks and handles SVM nested-guest CR0 read intercept. */
533# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
534 do \
535 { \
536 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
537 { /* probably likely */ } \
538 else \
539 { \
540 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
541 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
542 } \
543 } while (0)
544
545/**
546 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
547 */
548# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
549 do { \
550 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
551 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
552 } while (0)
553
554#else
555# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
556# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
557# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
559# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
561# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
562# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
564# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
566
567#endif
568
569
570/*********************************************************************************************************************************
571* Global Variables *
572*********************************************************************************************************************************/
573extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
574
575
576/** Function table for the ADD instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
578{
579 iemAImpl_add_u8, iemAImpl_add_u8_locked,
580 iemAImpl_add_u16, iemAImpl_add_u16_locked,
581 iemAImpl_add_u32, iemAImpl_add_u32_locked,
582 iemAImpl_add_u64, iemAImpl_add_u64_locked
583};
584
585/** Function table for the ADC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
587{
588 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
589 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
590 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
591 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
592};
593
594/** Function table for the SUB instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
596{
597 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
598 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
599 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
600 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
601};
602
603/** Function table for the SBB instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
605{
606 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
607 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
608 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
609 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
610};
611
612/** Function table for the OR instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
614{
615 iemAImpl_or_u8, iemAImpl_or_u8_locked,
616 iemAImpl_or_u16, iemAImpl_or_u16_locked,
617 iemAImpl_or_u32, iemAImpl_or_u32_locked,
618 iemAImpl_or_u64, iemAImpl_or_u64_locked
619};
620
621/** Function table for the XOR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
623{
624 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
625 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
626 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
627 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
628};
629
630/** Function table for the AND instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
632{
633 iemAImpl_and_u8, iemAImpl_and_u8_locked,
634 iemAImpl_and_u16, iemAImpl_and_u16_locked,
635 iemAImpl_and_u32, iemAImpl_and_u32_locked,
636 iemAImpl_and_u64, iemAImpl_and_u64_locked
637};
638
639/** Function table for the CMP instruction.
640 * @remarks Making operand order ASSUMPTIONS.
641 */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
643{
644 iemAImpl_cmp_u8, NULL,
645 iemAImpl_cmp_u16, NULL,
646 iemAImpl_cmp_u32, NULL,
647 iemAImpl_cmp_u64, NULL
648};
649
650/** Function table for the TEST instruction.
651 * @remarks Making operand order ASSUMPTIONS.
652 */
653IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
654{
655 iemAImpl_test_u8, NULL,
656 iemAImpl_test_u16, NULL,
657 iemAImpl_test_u32, NULL,
658 iemAImpl_test_u64, NULL
659};
660
661/** Function table for the BT instruction. */
662IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
663{
664 NULL, NULL,
665 iemAImpl_bt_u16, NULL,
666 iemAImpl_bt_u32, NULL,
667 iemAImpl_bt_u64, NULL
668};
669
670/** Function table for the BTC instruction. */
671IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
672{
673 NULL, NULL,
674 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
675 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
676 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
677};
678
679/** Function table for the BTR instruction. */
680IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
681{
682 NULL, NULL,
683 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
684 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
685 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
686};
687
688/** Function table for the BTS instruction. */
689IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
690{
691 NULL, NULL,
692 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
693 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
694 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
695};
696
697/** Function table for the BSF instruction. */
698IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
699{
700 NULL, NULL,
701 iemAImpl_bsf_u16, NULL,
702 iemAImpl_bsf_u32, NULL,
703 iemAImpl_bsf_u64, NULL
704};
705
706/** Function table for the BSR instruction. */
707IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
708{
709 NULL, NULL,
710 iemAImpl_bsr_u16, NULL,
711 iemAImpl_bsr_u32, NULL,
712 iemAImpl_bsr_u64, NULL
713};
714
715/** Function table for the IMUL instruction. */
716IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
717{
718 NULL, NULL,
719 iemAImpl_imul_two_u16, NULL,
720 iemAImpl_imul_two_u32, NULL,
721 iemAImpl_imul_two_u64, NULL
722};
723
724/** Group 1 /r lookup table. */
725IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
726{
727 &g_iemAImpl_add,
728 &g_iemAImpl_or,
729 &g_iemAImpl_adc,
730 &g_iemAImpl_sbb,
731 &g_iemAImpl_and,
732 &g_iemAImpl_sub,
733 &g_iemAImpl_xor,
734 &g_iemAImpl_cmp
735};
736
737/** Function table for the INC instruction. */
738IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
739{
740 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
741 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
742 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
743 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
744};
745
746/** Function table for the DEC instruction. */
747IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
748{
749 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
750 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
751 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
752 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
753};
754
755/** Function table for the NEG instruction. */
756IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
757{
758 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
759 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
760 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
761 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
762};
763
764/** Function table for the NOT instruction. */
765IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
766{
767 iemAImpl_not_u8, iemAImpl_not_u8_locked,
768 iemAImpl_not_u16, iemAImpl_not_u16_locked,
769 iemAImpl_not_u32, iemAImpl_not_u32_locked,
770 iemAImpl_not_u64, iemAImpl_not_u64_locked
771};
772
773
774/** Function table for the ROL instruction. */
775IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
776{
777 iemAImpl_rol_u8,
778 iemAImpl_rol_u16,
779 iemAImpl_rol_u32,
780 iemAImpl_rol_u64
781};
782
783/** Function table for the ROR instruction. */
784IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
785{
786 iemAImpl_ror_u8,
787 iemAImpl_ror_u16,
788 iemAImpl_ror_u32,
789 iemAImpl_ror_u64
790};
791
792/** Function table for the RCL instruction. */
793IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
794{
795 iemAImpl_rcl_u8,
796 iemAImpl_rcl_u16,
797 iemAImpl_rcl_u32,
798 iemAImpl_rcl_u64
799};
800
801/** Function table for the RCR instruction. */
802IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
803{
804 iemAImpl_rcr_u8,
805 iemAImpl_rcr_u16,
806 iemAImpl_rcr_u32,
807 iemAImpl_rcr_u64
808};
809
810/** Function table for the SHL instruction. */
811IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
812{
813 iemAImpl_shl_u8,
814 iemAImpl_shl_u16,
815 iemAImpl_shl_u32,
816 iemAImpl_shl_u64
817};
818
819/** Function table for the SHR instruction. */
820IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
821{
822 iemAImpl_shr_u8,
823 iemAImpl_shr_u16,
824 iemAImpl_shr_u32,
825 iemAImpl_shr_u64
826};
827
828/** Function table for the SAR instruction. */
829IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
830{
831 iemAImpl_sar_u8,
832 iemAImpl_sar_u16,
833 iemAImpl_sar_u32,
834 iemAImpl_sar_u64
835};
836
837
838/** Function table for the MUL instruction. */
839IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
840{
841 iemAImpl_mul_u8,
842 iemAImpl_mul_u16,
843 iemAImpl_mul_u32,
844 iemAImpl_mul_u64
845};
846
847/** Function table for the IMUL instruction working implicitly on rAX. */
848IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
849{
850 iemAImpl_imul_u8,
851 iemAImpl_imul_u16,
852 iemAImpl_imul_u32,
853 iemAImpl_imul_u64
854};
855
856/** Function table for the DIV instruction. */
857IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
858{
859 iemAImpl_div_u8,
860 iemAImpl_div_u16,
861 iemAImpl_div_u32,
862 iemAImpl_div_u64
863};
864
865/** Function table for the MUL instruction. */
866IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
867{
868 iemAImpl_idiv_u8,
869 iemAImpl_idiv_u16,
870 iemAImpl_idiv_u32,
871 iemAImpl_idiv_u64
872};
873
874/** Function table for the SHLD instruction */
875IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
876{
877 iemAImpl_shld_u16,
878 iemAImpl_shld_u32,
879 iemAImpl_shld_u64,
880};
881
882/** Function table for the SHRD instruction */
883IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
884{
885 iemAImpl_shrd_u16,
886 iemAImpl_shrd_u32,
887 iemAImpl_shrd_u64,
888};
889
890
891/** Function table for the PUNPCKLBW instruction */
892IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
893/** Function table for the PUNPCKLBD instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
895/** Function table for the PUNPCKLDQ instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
897/** Function table for the PUNPCKLQDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
899
900/** Function table for the PUNPCKHBW instruction */
901IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
902/** Function table for the PUNPCKHBD instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
904/** Function table for the PUNPCKHDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
906/** Function table for the PUNPCKHQDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
908
909/** Function table for the PXOR instruction */
910IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
911/** Function table for the PCMPEQB instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
913/** Function table for the PCMPEQW instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
915/** Function table for the PCMPEQD instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
917
918
919#if defined(IEM_LOG_MEMORY_WRITES)
920/** What IEM just wrote. */
921uint8_t g_abIemWrote[256];
922/** How much IEM just wrote. */
923size_t g_cbIemWrote;
924#endif
925
926
927/*********************************************************************************************************************************
928* Internal Functions *
929*********************************************************************************************************************************/
930IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
934/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
935IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
946IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
947#ifdef IEM_WITH_SETJMP
948DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
953#endif
954
955IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
956IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
970IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
971DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
972DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
973
974#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
975IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
981IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
982#endif
983
984#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
985IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
986IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
987#endif
988
989
990/**
991 * Sets the pass up status.
992 *
993 * @returns VINF_SUCCESS.
994 * @param pVCpu The cross context virtual CPU structure of the
995 * calling thread.
996 * @param rcPassUp The pass up status. Must be informational.
997 * VINF_SUCCESS is not allowed.
998 */
999IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1000{
1001 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1002
1003 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1004 if (rcOldPassUp == VINF_SUCCESS)
1005 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1006 /* If both are EM scheduling codes, use EM priority rules. */
1007 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1008 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1009 {
1010 if (rcPassUp < rcOldPassUp)
1011 {
1012 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1013 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1014 }
1015 else
1016 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1017 }
1018 /* Override EM scheduling with specific status code. */
1019 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1020 {
1021 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1023 }
1024 /* Don't override specific status code, first come first served. */
1025 else
1026 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 return VINF_SUCCESS;
1028}
1029
1030
1031/**
1032 * Calculates the CPU mode.
1033 *
1034 * This is mainly for updating IEMCPU::enmCpuMode.
1035 *
1036 * @returns CPU mode.
1037 * @param pVCpu The cross context virtual CPU structure of the
1038 * calling thread.
1039 */
1040DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1041{
1042 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1043 return IEMMODE_64BIT;
1044 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1045 return IEMMODE_32BIT;
1046 return IEMMODE_16BIT;
1047}
1048
1049
1050/**
1051 * Initializes the execution state.
1052 *
1053 * @param pVCpu The cross context virtual CPU structure of the
1054 * calling thread.
1055 * @param fBypassHandlers Whether to bypass access handlers.
1056 *
1057 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1058 * side-effects in strict builds.
1059 */
1060DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1061{
1062 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1072
1073 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1074 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1075#ifdef VBOX_STRICT
1076 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1077 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1078 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1079 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1080 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1081 pVCpu->iem.s.uRexReg = 127;
1082 pVCpu->iem.s.uRexB = 127;
1083 pVCpu->iem.s.offModRm = 127;
1084 pVCpu->iem.s.uRexIndex = 127;
1085 pVCpu->iem.s.iEffSeg = 127;
1086 pVCpu->iem.s.idxPrefix = 127;
1087 pVCpu->iem.s.uVex3rdReg = 127;
1088 pVCpu->iem.s.uVexLength = 127;
1089 pVCpu->iem.s.fEvexStuff = 127;
1090 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1091# ifdef IEM_WITH_CODE_TLB
1092 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1093 pVCpu->iem.s.pbInstrBuf = NULL;
1094 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1095 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1096 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1097 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1098# else
1099 pVCpu->iem.s.offOpcode = 127;
1100 pVCpu->iem.s.cbOpcode = 127;
1101# endif
1102#endif
1103
1104 pVCpu->iem.s.cActiveMappings = 0;
1105 pVCpu->iem.s.iNextMapping = 0;
1106 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1107 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1108#if 0
1109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1110 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1111 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1112 {
1113 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1114 Assert(pVmcs);
1115 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1116 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1117 {
1118 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1119 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1120 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1121 AssertRC(rc);
1122 }
1123 }
1124#endif
1125#endif
1126}
1127
1128#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1129/**
1130 * Performs a minimal reinitialization of the execution state.
1131 *
1132 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1133 * 'world-switch' types operations on the CPU. Currently only nested
1134 * hardware-virtualization uses it.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1137 */
1138IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1139{
1140 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1141 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1142
1143 pVCpu->iem.s.uCpl = uCpl;
1144 pVCpu->iem.s.enmCpuMode = enmMode;
1145 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffAddrMode = enmMode;
1147 if (enmMode != IEMMODE_64BIT)
1148 {
1149 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1150 pVCpu->iem.s.enmEffOpSize = enmMode;
1151 }
1152 else
1153 {
1154 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1155 pVCpu->iem.s.enmEffOpSize = enmMode;
1156 }
1157 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1158#ifndef IEM_WITH_CODE_TLB
1159 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1160 pVCpu->iem.s.offOpcode = 0;
1161 pVCpu->iem.s.cbOpcode = 0;
1162#endif
1163 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1164}
1165#endif
1166
1167/**
1168 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure of the
1171 * calling thread.
1172 */
1173DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1174{
1175 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1176#ifdef VBOX_STRICT
1177# ifdef IEM_WITH_CODE_TLB
1178 NOREF(pVCpu);
1179# else
1180 pVCpu->iem.s.cbOpcode = 0;
1181# endif
1182#else
1183 NOREF(pVCpu);
1184#endif
1185}
1186
1187
1188/**
1189 * Initializes the decoder state.
1190 *
1191 * iemReInitDecoder is mostly a copy of this function.
1192 *
1193 * @param pVCpu The cross context virtual CPU structure of the
1194 * calling thread.
1195 * @param fBypassHandlers Whether to bypass access handlers.
1196 * @param fDisregardLock Whether to disregard the LOCK prefix.
1197 */
1198DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1199{
1200 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1201 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1210
1211 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1212 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1213 pVCpu->iem.s.enmCpuMode = enmMode;
1214 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1215 pVCpu->iem.s.enmEffAddrMode = enmMode;
1216 if (enmMode != IEMMODE_64BIT)
1217 {
1218 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1219 pVCpu->iem.s.enmEffOpSize = enmMode;
1220 }
1221 else
1222 {
1223 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1224 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1225 }
1226 pVCpu->iem.s.fPrefixes = 0;
1227 pVCpu->iem.s.uRexReg = 0;
1228 pVCpu->iem.s.uRexB = 0;
1229 pVCpu->iem.s.uRexIndex = 0;
1230 pVCpu->iem.s.idxPrefix = 0;
1231 pVCpu->iem.s.uVex3rdReg = 0;
1232 pVCpu->iem.s.uVexLength = 0;
1233 pVCpu->iem.s.fEvexStuff = 0;
1234 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1235#ifdef IEM_WITH_CODE_TLB
1236 pVCpu->iem.s.pbInstrBuf = NULL;
1237 pVCpu->iem.s.offInstrNextByte = 0;
1238 pVCpu->iem.s.offCurInstrStart = 0;
1239# ifdef VBOX_STRICT
1240 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1241 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1242 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1243# endif
1244#else
1245 pVCpu->iem.s.offOpcode = 0;
1246 pVCpu->iem.s.cbOpcode = 0;
1247#endif
1248 pVCpu->iem.s.offModRm = 0;
1249 pVCpu->iem.s.cActiveMappings = 0;
1250 pVCpu->iem.s.iNextMapping = 0;
1251 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1252 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1253 pVCpu->iem.s.fDisregardLock = fDisregardLock;
1254
1255#ifdef DBGFTRACE_ENABLED
1256 switch (enmMode)
1257 {
1258 case IEMMODE_64BIT:
1259 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1260 break;
1261 case IEMMODE_32BIT:
1262 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1263 break;
1264 case IEMMODE_16BIT:
1265 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1266 break;
1267 }
1268#endif
1269}
1270
1271
1272/**
1273 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1274 *
1275 * This is mostly a copy of iemInitDecoder.
1276 *
1277 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1278 */
1279DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1280{
1281 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1284 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1285 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1286 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1289 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1290
1291 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1292 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1293 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1294 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1295 pVCpu->iem.s.enmEffAddrMode = enmMode;
1296 if (enmMode != IEMMODE_64BIT)
1297 {
1298 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1299 pVCpu->iem.s.enmEffOpSize = enmMode;
1300 }
1301 else
1302 {
1303 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1304 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1305 }
1306 pVCpu->iem.s.fPrefixes = 0;
1307 pVCpu->iem.s.uRexReg = 0;
1308 pVCpu->iem.s.uRexB = 0;
1309 pVCpu->iem.s.uRexIndex = 0;
1310 pVCpu->iem.s.idxPrefix = 0;
1311 pVCpu->iem.s.uVex3rdReg = 0;
1312 pVCpu->iem.s.uVexLength = 0;
1313 pVCpu->iem.s.fEvexStuff = 0;
1314 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1315#ifdef IEM_WITH_CODE_TLB
1316 if (pVCpu->iem.s.pbInstrBuf)
1317 {
1318 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1319 - pVCpu->iem.s.uInstrBufPc;
1320 if (off < pVCpu->iem.s.cbInstrBufTotal)
1321 {
1322 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1323 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1324 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1325 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1326 else
1327 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1328 }
1329 else
1330 {
1331 pVCpu->iem.s.pbInstrBuf = NULL;
1332 pVCpu->iem.s.offInstrNextByte = 0;
1333 pVCpu->iem.s.offCurInstrStart = 0;
1334 pVCpu->iem.s.cbInstrBuf = 0;
1335 pVCpu->iem.s.cbInstrBufTotal = 0;
1336 }
1337 }
1338 else
1339 {
1340 pVCpu->iem.s.offInstrNextByte = 0;
1341 pVCpu->iem.s.offCurInstrStart = 0;
1342 pVCpu->iem.s.cbInstrBuf = 0;
1343 pVCpu->iem.s.cbInstrBufTotal = 0;
1344 }
1345#else
1346 pVCpu->iem.s.cbOpcode = 0;
1347 pVCpu->iem.s.offOpcode = 0;
1348#endif
1349 pVCpu->iem.s.offModRm = 0;
1350 Assert(pVCpu->iem.s.cActiveMappings == 0);
1351 pVCpu->iem.s.iNextMapping = 0;
1352 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1353 Assert(pVCpu->iem.s.fBypassHandlers == false);
1354
1355#ifdef DBGFTRACE_ENABLED
1356 switch (enmMode)
1357 {
1358 case IEMMODE_64BIT:
1359 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1360 break;
1361 case IEMMODE_32BIT:
1362 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1363 break;
1364 case IEMMODE_16BIT:
1365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1366 break;
1367 }
1368#endif
1369}
1370
1371
1372
1373/**
1374 * Prefetch opcodes the first time when starting executing.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pVCpu The cross context virtual CPU structure of the
1378 * calling thread.
1379 * @param fBypassHandlers Whether to bypass access handlers.
1380 * @param fDisregardLock Whether to disregard LOCK prefixes.
1381 *
1382 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
1383 * store them as such.
1384 */
1385IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1386{
1387 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
1388
1389#ifdef IEM_WITH_CODE_TLB
1390 /** @todo Do ITLB lookup here. */
1391
1392#else /* !IEM_WITH_CODE_TLB */
1393
1394 /*
1395 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1396 *
1397 * First translate CS:rIP to a physical address.
1398 */
1399 uint32_t cbToTryRead;
1400 RTGCPTR GCPtrPC;
1401 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1402 {
1403 cbToTryRead = PAGE_SIZE;
1404 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1405 if (IEM_IS_CANONICAL(GCPtrPC))
1406 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1407 else
1408 return iemRaiseGeneralProtectionFault0(pVCpu);
1409 }
1410 else
1411 {
1412 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1413 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1414 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1415 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1416 else
1417 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1418 if (cbToTryRead) { /* likely */ }
1419 else /* overflowed */
1420 {
1421 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1422 cbToTryRead = UINT32_MAX;
1423 }
1424 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1425 Assert(GCPtrPC <= UINT32_MAX);
1426 }
1427
1428 RTGCPHYS GCPhys;
1429 uint64_t fFlags;
1430 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &fFlags, &GCPhys);
1431 if (RT_SUCCESS(rc)) { /* probable */ }
1432 else
1433 {
1434 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1435 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1436 }
1437 if ((fFlags & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1438 else
1439 {
1440 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1441 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1442 }
1443 if (!(fFlags & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1444 else
1445 {
1446 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1448 }
1449 GCPhys |= GCPtrPC & PAGE_OFFSET_MASK;
1450 /** @todo Check reserved bits and such stuff. PGM is better at doing
1451 * that, so do it when implementing the guest virtual address
1452 * TLB... */
1453
1454 /*
1455 * Read the bytes at this address.
1456 */
1457 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1458 if (cbToTryRead > cbLeftOnPage)
1459 cbToTryRead = cbLeftOnPage;
1460 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1461 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1462
1463 if (!pVCpu->iem.s.fBypassHandlers)
1464 {
1465 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1467 { /* likely */ }
1468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1469 {
1470 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1473 }
1474 else
1475 {
1476 Log((RT_SUCCESS(rcStrict)
1477 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1478 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1479 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1480 return rcStrict;
1481 }
1482 }
1483 else
1484 {
1485 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1486 if (RT_SUCCESS(rc))
1487 { /* likely */ }
1488 else
1489 {
1490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1491 GCPtrPC, GCPhys, rc, cbToTryRead));
1492 return rc;
1493 }
1494 }
1495 pVCpu->iem.s.cbOpcode = cbToTryRead;
1496#endif /* !IEM_WITH_CODE_TLB */
1497 return VINF_SUCCESS;
1498}
1499
1500
1501/**
1502 * Invalidates the IEM TLBs.
1503 *
1504 * This is called internally as well as by PGM when moving GC mappings.
1505 *
1506 * @returns
1507 * @param pVCpu The cross context virtual CPU structure of the calling
1508 * thread.
1509 * @param fVmm Set when PGM calls us with a remapping.
1510 */
1511VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1512{
1513#ifdef IEM_WITH_CODE_TLB
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1516 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1517 { /* very likely */ }
1518 else
1519 {
1520 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1521 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1522 while (i-- > 0)
1523 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1524 }
1525#endif
1526
1527#ifdef IEM_WITH_DATA_TLB
1528 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539 NOREF(pVCpu); NOREF(fVmm);
1540}
1541
1542
1543/**
1544 * Invalidates a page in the TLBs.
1545 *
1546 * @param pVCpu The cross context virtual CPU structure of the calling
1547 * thread.
1548 * @param GCPtr The address of the page to invalidate
1549 */
1550VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1551{
1552#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1553 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1556 uintptr_t idx = (uint8_t)GCPtr;
1557
1558# ifdef IEM_WITH_CODE_TLB
1559 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1560 {
1561 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1562 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1563 pVCpu->iem.s.cbInstrBufTotal = 0;
1564 }
1565# endif
1566
1567# ifdef IEM_WITH_DATA_TLB
1568 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1569 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1570# endif
1571#else
1572 NOREF(pVCpu); NOREF(GCPtr);
1573#endif
1574}
1575
1576
1577/**
1578 * Invalidates the host physical aspects of the IEM TLBs.
1579 *
1580 * This is called internally as well as by PGM when moving GC mappings.
1581 *
1582 * @param pVCpu The cross context virtual CPU structure of the calling
1583 * thread.
1584 */
1585VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1586{
1587#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1588 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1589
1590# ifdef IEM_WITH_CODE_TLB
1591 pVCpu->iem.s.cbInstrBufTotal = 0;
1592# endif
1593 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1594 if (uTlbPhysRev != 0)
1595 {
1596 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1597 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1598 }
1599 else
1600 {
1601 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1603
1604 unsigned i;
1605# ifdef IEM_WITH_CODE_TLB
1606 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1607 while (i-- > 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1610 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1611 }
1612# endif
1613# ifdef IEM_WITH_DATA_TLB
1614 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1615 while (i-- > 0)
1616 {
1617 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1618 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1619 }
1620# endif
1621 }
1622#else
1623 NOREF(pVCpu);
1624#endif
1625}
1626
1627
1628/**
1629 * Invalidates the host physical aspects of the IEM TLBs.
1630 *
1631 * This is called internally as well as by PGM when moving GC mappings.
1632 *
1633 * @param pVM The cross context VM structure.
1634 *
1635 * @remarks Caller holds the PGM lock.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1638{
1639 RT_NOREF_PV(pVM);
1640}
1641
1642#ifdef IEM_WITH_CODE_TLB
1643
1644/**
1645 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1646 * failure and jumps.
1647 *
1648 * We end up here for a number of reasons:
1649 * - pbInstrBuf isn't yet initialized.
1650 * - Advancing beyond the buffer boundrary (e.g. cross page).
1651 * - Advancing beyond the CS segment limit.
1652 * - Fetching from non-mappable page (e.g. MMIO).
1653 *
1654 * @param pVCpu The cross context virtual CPU structure of the
1655 * calling thread.
1656 * @param pvDst Where to return the bytes.
1657 * @param cbDst Number of bytes to read.
1658 *
1659 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1660 */
1661IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1662{
1663#ifdef IN_RING3
1664 for (;;)
1665 {
1666 Assert(cbDst <= 8);
1667 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1668
1669 /*
1670 * We might have a partial buffer match, deal with that first to make the
1671 * rest simpler. This is the first part of the cross page/buffer case.
1672 */
1673 if (pVCpu->iem.s.pbInstrBuf != NULL)
1674 {
1675 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1676 {
1677 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1678 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1679 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1680
1681 cbDst -= cbCopy;
1682 pvDst = (uint8_t *)pvDst + cbCopy;
1683 offBuf += cbCopy;
1684 pVCpu->iem.s.offInstrNextByte += offBuf;
1685 }
1686 }
1687
1688 /*
1689 * Check segment limit, figuring how much we're allowed to access at this point.
1690 *
1691 * We will fault immediately if RIP is past the segment limit / in non-canonical
1692 * territory. If we do continue, there are one or more bytes to read before we
1693 * end up in trouble and we need to do that first before faulting.
1694 */
1695 RTGCPTR GCPtrFirst;
1696 uint32_t cbMaxRead;
1697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1698 {
1699 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1700 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1701 { /* likely */ }
1702 else
1703 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1704 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1705 }
1706 else
1707 {
1708 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1709 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1710 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1711 { /* likely */ }
1712 else
1713 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1714 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1715 if (cbMaxRead != 0)
1716 { /* likely */ }
1717 else
1718 {
1719 /* Overflowed because address is 0 and limit is max. */
1720 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1721 cbMaxRead = X86_PAGE_SIZE;
1722 }
1723 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1724 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1725 if (cbMaxRead2 < cbMaxRead)
1726 cbMaxRead = cbMaxRead2;
1727 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1728 }
1729
1730 /*
1731 * Get the TLB entry for this piece of code.
1732 */
1733 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1734 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1735 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1736 if (pTlbe->uTag == uTag)
1737 {
1738 /* likely when executing lots of code, otherwise unlikely */
1739# ifdef VBOX_WITH_STATISTICS
1740 pVCpu->iem.s.CodeTlb.cTlbHits++;
1741# endif
1742 }
1743 else
1744 {
1745 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1746 RTGCPHYS GCPhys;
1747 uint64_t fFlags;
1748 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &fFlags, &GCPhys);
1749 if (RT_FAILURE(rc))
1750 {
1751 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1752 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1753 }
1754
1755 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1756 pTlbe->uTag = uTag;
1757 pTlbe->fFlagsAndPhysRev = (~fFlags & (X86_PTE_US | X86_PTE_RW | X86_PTE_D)) | (fFlags >> X86_PTE_PAE_BIT_NX);
1758 pTlbe->GCPhys = GCPhys;
1759 pTlbe->pbMappingR3 = NULL;
1760 }
1761
1762 /*
1763 * Check TLB page table level access flags.
1764 */
1765 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1766 {
1767 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1768 {
1769 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1770 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1771 }
1772 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1773 {
1774 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1775 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1776 }
1777 }
1778
1779 /*
1780 * Look up the physical page info if necessary.
1781 */
1782 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1783 { /* not necessary */ }
1784 else
1785 {
1786 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1787 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1788 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1789 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1790 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1791 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1792 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1793 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1794 }
1795
1796# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1797 /*
1798 * Try do a direct read using the pbMappingR3 pointer.
1799 */
1800 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1801 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1802 {
1803 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1804 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1805 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1806 {
1807 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1808 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1809 }
1810 else
1811 {
1812 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1813 Assert(cbInstr < cbMaxRead);
1814 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1815 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1816 }
1817 if (cbDst <= cbMaxRead)
1818 {
1819 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1820 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1821 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1822 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1823 return;
1824 }
1825 pVCpu->iem.s.pbInstrBuf = NULL;
1826
1827 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1828 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1829 }
1830 else
1831# endif
1832#if 0
1833 /*
1834 * If there is no special read handling, so we can read a bit more and
1835 * put it in the prefetch buffer.
1836 */
1837 if ( cbDst < cbMaxRead
1838 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1839 {
1840 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1841 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1842 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1843 { /* likely */ }
1844 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1845 {
1846 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1847 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1848 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1849 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1850 }
1851 else
1852 {
1853 Log((RT_SUCCESS(rcStrict)
1854 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1855 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1856 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1857 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1858 }
1859 }
1860 /*
1861 * Special read handling, so only read exactly what's needed.
1862 * This is a highly unlikely scenario.
1863 */
1864 else
1865#endif
1866 {
1867 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1868 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1869 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1870 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1871 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1872 { /* likely */ }
1873 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1874 {
1875 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1876 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1877 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1878 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1879 }
1880 else
1881 {
1882 Log((RT_SUCCESS(rcStrict)
1883 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1884 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1885 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1886 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1887 }
1888 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1889 if (cbToRead == cbDst)
1890 return;
1891 }
1892
1893 /*
1894 * More to read, loop.
1895 */
1896 cbDst -= cbMaxRead;
1897 pvDst = (uint8_t *)pvDst + cbMaxRead;
1898 }
1899#else
1900 RT_NOREF(pvDst, cbDst);
1901 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1902#endif
1903}
1904
1905#else
1906
1907/**
1908 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1909 * exception if it fails.
1910 *
1911 * @returns Strict VBox status code.
1912 * @param pVCpu The cross context virtual CPU structure of the
1913 * calling thread.
1914 * @param cbMin The minimum number of bytes relative offOpcode
1915 * that must be read.
1916 */
1917IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1918{
1919 /*
1920 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1921 *
1922 * First translate CS:rIP to a physical address.
1923 */
1924 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1925 uint32_t cbToTryRead;
1926 RTGCPTR GCPtrNext;
1927 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1928 {
1929 cbToTryRead = PAGE_SIZE;
1930 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1931 if (!IEM_IS_CANONICAL(GCPtrNext))
1932 return iemRaiseGeneralProtectionFault0(pVCpu);
1933 }
1934 else
1935 {
1936 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1937 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1938 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1939 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1940 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1941 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1942 if (!cbToTryRead) /* overflowed */
1943 {
1944 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1945 cbToTryRead = UINT32_MAX;
1946 /** @todo check out wrapping around the code segment. */
1947 }
1948 if (cbToTryRead < cbMin - cbLeft)
1949 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1950 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1951 }
1952
1953 /* Only read up to the end of the page, and make sure we don't read more
1954 than the opcode buffer can hold. */
1955 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1956 if (cbToTryRead > cbLeftOnPage)
1957 cbToTryRead = cbLeftOnPage;
1958 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1959 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1960/** @todo r=bird: Convert assertion into undefined opcode exception? */
1961 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1962
1963 RTGCPHYS GCPhys;
1964 uint64_t fFlags;
1965 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &fFlags, &GCPhys);
1966 if (RT_FAILURE(rc))
1967 {
1968 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1970 }
1971 if (!(fFlags & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1972 {
1973 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1974 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1975 }
1976 if ((fFlags & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1977 {
1978 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1979 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1980 }
1981 GCPhys |= GCPtrNext & PAGE_OFFSET_MASK;
1982 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1983 /** @todo Check reserved bits and such stuff. PGM is better at doing
1984 * that, so do it when implementing the guest virtual address
1985 * TLB... */
1986
1987 /*
1988 * Read the bytes at this address.
1989 *
1990 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1991 * and since PATM should only patch the start of an instruction there
1992 * should be no need to check again here.
1993 */
1994 if (!pVCpu->iem.s.fBypassHandlers)
1995 {
1996 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1997 cbToTryRead, PGMACCESSORIGIN_IEM);
1998 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1999 { /* likely */ }
2000 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2001 {
2002 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2003 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2004 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2005 }
2006 else
2007 {
2008 Log((RT_SUCCESS(rcStrict)
2009 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2010 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2011 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2012 return rcStrict;
2013 }
2014 }
2015 else
2016 {
2017 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2018 if (RT_SUCCESS(rc))
2019 { /* likely */ }
2020 else
2021 {
2022 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2023 return rc;
2024 }
2025 }
2026 pVCpu->iem.s.cbOpcode += cbToTryRead;
2027 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2028
2029 return VINF_SUCCESS;
2030}
2031
2032#endif /* !IEM_WITH_CODE_TLB */
2033#ifndef IEM_WITH_SETJMP
2034
2035/**
2036 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2037 *
2038 * @returns Strict VBox status code.
2039 * @param pVCpu The cross context virtual CPU structure of the
2040 * calling thread.
2041 * @param pb Where to return the opcode byte.
2042 */
2043DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2044{
2045 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2046 if (rcStrict == VINF_SUCCESS)
2047 {
2048 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2049 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2050 pVCpu->iem.s.offOpcode = offOpcode + 1;
2051 }
2052 else
2053 *pb = 0;
2054 return rcStrict;
2055}
2056
2057
2058/**
2059 * Fetches the next opcode byte.
2060 *
2061 * @returns Strict VBox status code.
2062 * @param pVCpu The cross context virtual CPU structure of the
2063 * calling thread.
2064 * @param pu8 Where to return the opcode byte.
2065 */
2066DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2067{
2068 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2069 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2070 {
2071 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2072 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2073 return VINF_SUCCESS;
2074 }
2075 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2076}
2077
2078#else /* IEM_WITH_SETJMP */
2079
2080/**
2081 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2082 *
2083 * @returns The opcode byte.
2084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2085 */
2086DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2087{
2088# ifdef IEM_WITH_CODE_TLB
2089 uint8_t u8;
2090 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2091 return u8;
2092# else
2093 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2094 if (rcStrict == VINF_SUCCESS)
2095 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2096 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2097# endif
2098}
2099
2100
2101/**
2102 * Fetches the next opcode byte, longjmp on error.
2103 *
2104 * @returns The opcode byte.
2105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2106 */
2107DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2108{
2109# ifdef IEM_WITH_CODE_TLB
2110 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2111 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2112 if (RT_LIKELY( pbBuf != NULL
2113 && offBuf < pVCpu->iem.s.cbInstrBuf))
2114 {
2115 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2116 return pbBuf[offBuf];
2117 }
2118# else
2119 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2120 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2121 {
2122 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2123 return pVCpu->iem.s.abOpcode[offOpcode];
2124 }
2125# endif
2126 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2127}
2128
2129#endif /* IEM_WITH_SETJMP */
2130
2131/**
2132 * Fetches the next opcode byte, returns automatically on failure.
2133 *
2134 * @param a_pu8 Where to return the opcode byte.
2135 * @remark Implicitly references pVCpu.
2136 */
2137#ifndef IEM_WITH_SETJMP
2138# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2139 do \
2140 { \
2141 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2142 if (rcStrict2 == VINF_SUCCESS) \
2143 { /* likely */ } \
2144 else \
2145 return rcStrict2; \
2146 } while (0)
2147#else
2148# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2149#endif /* IEM_WITH_SETJMP */
2150
2151
2152#ifndef IEM_WITH_SETJMP
2153/**
2154 * Fetches the next signed byte from the opcode stream.
2155 *
2156 * @returns Strict VBox status code.
2157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2158 * @param pi8 Where to return the signed byte.
2159 */
2160DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2161{
2162 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2163}
2164#endif /* !IEM_WITH_SETJMP */
2165
2166
2167/**
2168 * Fetches the next signed byte from the opcode stream, returning automatically
2169 * on failure.
2170 *
2171 * @param a_pi8 Where to return the signed byte.
2172 * @remark Implicitly references pVCpu.
2173 */
2174#ifndef IEM_WITH_SETJMP
2175# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2176 do \
2177 { \
2178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2179 if (rcStrict2 != VINF_SUCCESS) \
2180 return rcStrict2; \
2181 } while (0)
2182#else /* IEM_WITH_SETJMP */
2183# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2184
2185#endif /* IEM_WITH_SETJMP */
2186
2187#ifndef IEM_WITH_SETJMP
2188
2189/**
2190 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2191 *
2192 * @returns Strict VBox status code.
2193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2194 * @param pu16 Where to return the opcode dword.
2195 */
2196DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2197{
2198 uint8_t u8;
2199 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2200 if (rcStrict == VINF_SUCCESS)
2201 *pu16 = (int8_t)u8;
2202 return rcStrict;
2203}
2204
2205
2206/**
2207 * Fetches the next signed byte from the opcode stream, extending it to
2208 * unsigned 16-bit.
2209 *
2210 * @returns Strict VBox status code.
2211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2212 * @param pu16 Where to return the unsigned word.
2213 */
2214DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2215{
2216 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2217 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2218 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2219
2220 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2221 pVCpu->iem.s.offOpcode = offOpcode + 1;
2222 return VINF_SUCCESS;
2223}
2224
2225#endif /* !IEM_WITH_SETJMP */
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream and sign-extending it to
2229 * a word, returning automatically on failure.
2230 *
2231 * @param a_pu16 Where to return the word.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else
2243# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244#endif
2245
2246#ifndef IEM_WITH_SETJMP
2247
2248/**
2249 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2250 *
2251 * @returns Strict VBox status code.
2252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2253 * @param pu32 Where to return the opcode dword.
2254 */
2255DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2256{
2257 uint8_t u8;
2258 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2259 if (rcStrict == VINF_SUCCESS)
2260 *pu32 = (int8_t)u8;
2261 return rcStrict;
2262}
2263
2264
2265/**
2266 * Fetches the next signed byte from the opcode stream, extending it to
2267 * unsigned 32-bit.
2268 *
2269 * @returns Strict VBox status code.
2270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2271 * @param pu32 Where to return the unsigned dword.
2272 */
2273DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2274{
2275 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2276 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2277 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2278
2279 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2280 pVCpu->iem.s.offOpcode = offOpcode + 1;
2281 return VINF_SUCCESS;
2282}
2283
2284#endif /* !IEM_WITH_SETJMP */
2285
2286/**
2287 * Fetches the next signed byte from the opcode stream and sign-extending it to
2288 * a word, returning automatically on failure.
2289 *
2290 * @param a_pu32 Where to return the word.
2291 * @remark Implicitly references pVCpu.
2292 */
2293#ifndef IEM_WITH_SETJMP
2294#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2295 do \
2296 { \
2297 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2298 if (rcStrict2 != VINF_SUCCESS) \
2299 return rcStrict2; \
2300 } while (0)
2301#else
2302# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2303#endif
2304
2305#ifndef IEM_WITH_SETJMP
2306
2307/**
2308 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2309 *
2310 * @returns Strict VBox status code.
2311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2312 * @param pu64 Where to return the opcode qword.
2313 */
2314DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2315{
2316 uint8_t u8;
2317 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2318 if (rcStrict == VINF_SUCCESS)
2319 *pu64 = (int8_t)u8;
2320 return rcStrict;
2321}
2322
2323
2324/**
2325 * Fetches the next signed byte from the opcode stream, extending it to
2326 * unsigned 64-bit.
2327 *
2328 * @returns Strict VBox status code.
2329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2330 * @param pu64 Where to return the unsigned qword.
2331 */
2332DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2333{
2334 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2335 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2336 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2337
2338 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2339 pVCpu->iem.s.offOpcode = offOpcode + 1;
2340 return VINF_SUCCESS;
2341}
2342
2343#endif /* !IEM_WITH_SETJMP */
2344
2345
2346/**
2347 * Fetches the next signed byte from the opcode stream and sign-extending it to
2348 * a word, returning automatically on failure.
2349 *
2350 * @param a_pu64 Where to return the word.
2351 * @remark Implicitly references pVCpu.
2352 */
2353#ifndef IEM_WITH_SETJMP
2354# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2355 do \
2356 { \
2357 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2358 if (rcStrict2 != VINF_SUCCESS) \
2359 return rcStrict2; \
2360 } while (0)
2361#else
2362# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2363#endif
2364
2365
2366#ifndef IEM_WITH_SETJMP
2367/**
2368 * Fetches the next opcode byte.
2369 *
2370 * @returns Strict VBox status code.
2371 * @param pVCpu The cross context virtual CPU structure of the
2372 * calling thread.
2373 * @param pu8 Where to return the opcode byte.
2374 */
2375DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2376{
2377 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2378 pVCpu->iem.s.offModRm = offOpcode;
2379 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2380 {
2381 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2382 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2383 return VINF_SUCCESS;
2384 }
2385 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2386}
2387#else /* IEM_WITH_SETJMP */
2388/**
2389 * Fetches the next opcode byte, longjmp on error.
2390 *
2391 * @returns The opcode byte.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 */
2394DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2395{
2396# ifdef IEM_WITH_CODE_TLB
2397 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2398 pVCpu->iem.s.offModRm = offBuf;
2399 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2400 if (RT_LIKELY( pbBuf != NULL
2401 && offBuf < pVCpu->iem.s.cbInstrBuf))
2402 {
2403 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2404 return pbBuf[offBuf];
2405 }
2406# else
2407 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2408 pVCpu->iem.s.offModRm = offOpcode;
2409 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2410 {
2411 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2412 return pVCpu->iem.s.abOpcode[offOpcode];
2413 }
2414# endif
2415 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2416}
2417#endif /* IEM_WITH_SETJMP */
2418
2419/**
2420 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2421 * on failure.
2422 *
2423 * Will note down the position of the ModR/M byte for VT-x exits.
2424 *
2425 * @param a_pbRm Where to return the RM opcode byte.
2426 * @remark Implicitly references pVCpu.
2427 */
2428#ifndef IEM_WITH_SETJMP
2429# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2430 do \
2431 { \
2432 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2433 if (rcStrict2 == VINF_SUCCESS) \
2434 { /* likely */ } \
2435 else \
2436 return rcStrict2; \
2437 } while (0)
2438#else
2439# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2440#endif /* IEM_WITH_SETJMP */
2441
2442
2443#ifndef IEM_WITH_SETJMP
2444
2445/**
2446 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2447 *
2448 * @returns Strict VBox status code.
2449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2450 * @param pu16 Where to return the opcode word.
2451 */
2452DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2453{
2454 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2455 if (rcStrict == VINF_SUCCESS)
2456 {
2457 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2458# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2459 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2460# else
2461 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2462# endif
2463 pVCpu->iem.s.offOpcode = offOpcode + 2;
2464 }
2465 else
2466 *pu16 = 0;
2467 return rcStrict;
2468}
2469
2470
2471/**
2472 * Fetches the next opcode word.
2473 *
2474 * @returns Strict VBox status code.
2475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2476 * @param pu16 Where to return the opcode word.
2477 */
2478DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2479{
2480 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2481 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2482 {
2483 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2484# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2485 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2486# else
2487 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488# endif
2489 return VINF_SUCCESS;
2490 }
2491 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2492}
2493
2494#else /* IEM_WITH_SETJMP */
2495
2496/**
2497 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2498 *
2499 * @returns The opcode word.
2500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2501 */
2502DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2503{
2504# ifdef IEM_WITH_CODE_TLB
2505 uint16_t u16;
2506 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2507 return u16;
2508# else
2509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2510 if (rcStrict == VINF_SUCCESS)
2511 {
2512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2513 pVCpu->iem.s.offOpcode += 2;
2514# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2515 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2516# else
2517 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2518# endif
2519 }
2520 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2521# endif
2522}
2523
2524
2525/**
2526 * Fetches the next opcode word, longjmp on error.
2527 *
2528 * @returns The opcode word.
2529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2530 */
2531DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2532{
2533# ifdef IEM_WITH_CODE_TLB
2534 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2535 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2536 if (RT_LIKELY( pbBuf != NULL
2537 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2538 {
2539 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2540# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2541 return *(uint16_t const *)&pbBuf[offBuf];
2542# else
2543 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2544# endif
2545 }
2546# else
2547 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2548 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2549 {
2550 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2551# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2552 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2553# else
2554 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2555# endif
2556 }
2557# endif
2558 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2559}
2560
2561#endif /* IEM_WITH_SETJMP */
2562
2563
2564/**
2565 * Fetches the next opcode word, returns automatically on failure.
2566 *
2567 * @param a_pu16 Where to return the opcode word.
2568 * @remark Implicitly references pVCpu.
2569 */
2570#ifndef IEM_WITH_SETJMP
2571# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2572 do \
2573 { \
2574 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2575 if (rcStrict2 != VINF_SUCCESS) \
2576 return rcStrict2; \
2577 } while (0)
2578#else
2579# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2580#endif
2581
2582#ifndef IEM_WITH_SETJMP
2583
2584/**
2585 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2586 *
2587 * @returns Strict VBox status code.
2588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2589 * @param pu32 Where to return the opcode double word.
2590 */
2591DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2592{
2593 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2594 if (rcStrict == VINF_SUCCESS)
2595 {
2596 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2597 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2598 pVCpu->iem.s.offOpcode = offOpcode + 2;
2599 }
2600 else
2601 *pu32 = 0;
2602 return rcStrict;
2603}
2604
2605
2606/**
2607 * Fetches the next opcode word, zero extending it to a double word.
2608 *
2609 * @returns Strict VBox status code.
2610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2611 * @param pu32 Where to return the opcode double word.
2612 */
2613DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2614{
2615 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2616 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2617 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2618
2619 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2620 pVCpu->iem.s.offOpcode = offOpcode + 2;
2621 return VINF_SUCCESS;
2622}
2623
2624#endif /* !IEM_WITH_SETJMP */
2625
2626
2627/**
2628 * Fetches the next opcode word and zero extends it to a double word, returns
2629 * automatically on failure.
2630 *
2631 * @param a_pu32 Where to return the opcode double word.
2632 * @remark Implicitly references pVCpu.
2633 */
2634#ifndef IEM_WITH_SETJMP
2635# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2636 do \
2637 { \
2638 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2639 if (rcStrict2 != VINF_SUCCESS) \
2640 return rcStrict2; \
2641 } while (0)
2642#else
2643# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2644#endif
2645
2646#ifndef IEM_WITH_SETJMP
2647
2648/**
2649 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2650 *
2651 * @returns Strict VBox status code.
2652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2653 * @param pu64 Where to return the opcode quad word.
2654 */
2655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2656{
2657 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2658 if (rcStrict == VINF_SUCCESS)
2659 {
2660 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2661 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2662 pVCpu->iem.s.offOpcode = offOpcode + 2;
2663 }
2664 else
2665 *pu64 = 0;
2666 return rcStrict;
2667}
2668
2669
2670/**
2671 * Fetches the next opcode word, zero extending it to a quad word.
2672 *
2673 * @returns Strict VBox status code.
2674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2675 * @param pu64 Where to return the opcode quad word.
2676 */
2677DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2678{
2679 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2680 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2681 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2682
2683 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2684 pVCpu->iem.s.offOpcode = offOpcode + 2;
2685 return VINF_SUCCESS;
2686}
2687
2688#endif /* !IEM_WITH_SETJMP */
2689
2690/**
2691 * Fetches the next opcode word and zero extends it to a quad word, returns
2692 * automatically on failure.
2693 *
2694 * @param a_pu64 Where to return the opcode quad word.
2695 * @remark Implicitly references pVCpu.
2696 */
2697#ifndef IEM_WITH_SETJMP
2698# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2699 do \
2700 { \
2701 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2702 if (rcStrict2 != VINF_SUCCESS) \
2703 return rcStrict2; \
2704 } while (0)
2705#else
2706# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2707#endif
2708
2709
2710#ifndef IEM_WITH_SETJMP
2711/**
2712 * Fetches the next signed word from the opcode stream.
2713 *
2714 * @returns Strict VBox status code.
2715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2716 * @param pi16 Where to return the signed word.
2717 */
2718DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2719{
2720 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2721}
2722#endif /* !IEM_WITH_SETJMP */
2723
2724
2725/**
2726 * Fetches the next signed word from the opcode stream, returning automatically
2727 * on failure.
2728 *
2729 * @param a_pi16 Where to return the signed word.
2730 * @remark Implicitly references pVCpu.
2731 */
2732#ifndef IEM_WITH_SETJMP
2733# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2734 do \
2735 { \
2736 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2737 if (rcStrict2 != VINF_SUCCESS) \
2738 return rcStrict2; \
2739 } while (0)
2740#else
2741# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2742#endif
2743
2744#ifndef IEM_WITH_SETJMP
2745
2746/**
2747 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2748 *
2749 * @returns Strict VBox status code.
2750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2751 * @param pu32 Where to return the opcode dword.
2752 */
2753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2754{
2755 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2756 if (rcStrict == VINF_SUCCESS)
2757 {
2758 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2759# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2760 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2761# else
2762 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2763 pVCpu->iem.s.abOpcode[offOpcode + 1],
2764 pVCpu->iem.s.abOpcode[offOpcode + 2],
2765 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2766# endif
2767 pVCpu->iem.s.offOpcode = offOpcode + 4;
2768 }
2769 else
2770 *pu32 = 0;
2771 return rcStrict;
2772}
2773
2774
2775/**
2776 * Fetches the next opcode dword.
2777 *
2778 * @returns Strict VBox status code.
2779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2780 * @param pu32 Where to return the opcode double word.
2781 */
2782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2783{
2784 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2785 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2786 {
2787 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2788# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2789 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2790# else
2791 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2792 pVCpu->iem.s.abOpcode[offOpcode + 1],
2793 pVCpu->iem.s.abOpcode[offOpcode + 2],
2794 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2795# endif
2796 return VINF_SUCCESS;
2797 }
2798 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2799}
2800
2801#else /* !IEM_WITH_SETJMP */
2802
2803/**
2804 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2805 *
2806 * @returns The opcode dword.
2807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2808 */
2809DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2810{
2811# ifdef IEM_WITH_CODE_TLB
2812 uint32_t u32;
2813 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2814 return u32;
2815# else
2816 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2817 if (rcStrict == VINF_SUCCESS)
2818 {
2819 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2820 pVCpu->iem.s.offOpcode = offOpcode + 4;
2821# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2822 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2823# else
2824 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2825 pVCpu->iem.s.abOpcode[offOpcode + 1],
2826 pVCpu->iem.s.abOpcode[offOpcode + 2],
2827 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2828# endif
2829 }
2830 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2831# endif
2832}
2833
2834
2835/**
2836 * Fetches the next opcode dword, longjmp on error.
2837 *
2838 * @returns The opcode dword.
2839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2840 */
2841DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2842{
2843# ifdef IEM_WITH_CODE_TLB
2844 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2845 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2846 if (RT_LIKELY( pbBuf != NULL
2847 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2848 {
2849 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2850# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2851 return *(uint32_t const *)&pbBuf[offBuf];
2852# else
2853 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2854 pbBuf[offBuf + 1],
2855 pbBuf[offBuf + 2],
2856 pbBuf[offBuf + 3]);
2857# endif
2858 }
2859# else
2860 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2861 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2862 {
2863 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2864# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2865 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2866# else
2867 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2868 pVCpu->iem.s.abOpcode[offOpcode + 1],
2869 pVCpu->iem.s.abOpcode[offOpcode + 2],
2870 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2871# endif
2872 }
2873# endif
2874 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2875}
2876
2877#endif /* !IEM_WITH_SETJMP */
2878
2879
2880/**
2881 * Fetches the next opcode dword, returns automatically on failure.
2882 *
2883 * @param a_pu32 Where to return the opcode dword.
2884 * @remark Implicitly references pVCpu.
2885 */
2886#ifndef IEM_WITH_SETJMP
2887# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2888 do \
2889 { \
2890 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2891 if (rcStrict2 != VINF_SUCCESS) \
2892 return rcStrict2; \
2893 } while (0)
2894#else
2895# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2896#endif
2897
2898#ifndef IEM_WITH_SETJMP
2899
2900/**
2901 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2902 *
2903 * @returns Strict VBox status code.
2904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2905 * @param pu64 Where to return the opcode dword.
2906 */
2907DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2908{
2909 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2910 if (rcStrict == VINF_SUCCESS)
2911 {
2912 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2913 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2914 pVCpu->iem.s.abOpcode[offOpcode + 1],
2915 pVCpu->iem.s.abOpcode[offOpcode + 2],
2916 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2917 pVCpu->iem.s.offOpcode = offOpcode + 4;
2918 }
2919 else
2920 *pu64 = 0;
2921 return rcStrict;
2922}
2923
2924
2925/**
2926 * Fetches the next opcode dword, zero extending it to a quad word.
2927 *
2928 * @returns Strict VBox status code.
2929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2930 * @param pu64 Where to return the opcode quad word.
2931 */
2932DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2933{
2934 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2935 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2936 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2937
2938 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2939 pVCpu->iem.s.abOpcode[offOpcode + 1],
2940 pVCpu->iem.s.abOpcode[offOpcode + 2],
2941 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2942 pVCpu->iem.s.offOpcode = offOpcode + 4;
2943 return VINF_SUCCESS;
2944}
2945
2946#endif /* !IEM_WITH_SETJMP */
2947
2948
2949/**
2950 * Fetches the next opcode dword and zero extends it to a quad word, returns
2951 * automatically on failure.
2952 *
2953 * @param a_pu64 Where to return the opcode quad word.
2954 * @remark Implicitly references pVCpu.
2955 */
2956#ifndef IEM_WITH_SETJMP
2957# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2958 do \
2959 { \
2960 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2961 if (rcStrict2 != VINF_SUCCESS) \
2962 return rcStrict2; \
2963 } while (0)
2964#else
2965# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2966#endif
2967
2968
2969#ifndef IEM_WITH_SETJMP
2970/**
2971 * Fetches the next signed double word from the opcode stream.
2972 *
2973 * @returns Strict VBox status code.
2974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2975 * @param pi32 Where to return the signed double word.
2976 */
2977DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2978{
2979 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2980}
2981#endif
2982
2983/**
2984 * Fetches the next signed double word from the opcode stream, returning
2985 * automatically on failure.
2986 *
2987 * @param a_pi32 Where to return the signed double word.
2988 * @remark Implicitly references pVCpu.
2989 */
2990#ifndef IEM_WITH_SETJMP
2991# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2992 do \
2993 { \
2994 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2995 if (rcStrict2 != VINF_SUCCESS) \
2996 return rcStrict2; \
2997 } while (0)
2998#else
2999# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3000#endif
3001
3002#ifndef IEM_WITH_SETJMP
3003
3004/**
3005 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3006 *
3007 * @returns Strict VBox status code.
3008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3009 * @param pu64 Where to return the opcode qword.
3010 */
3011DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3012{
3013 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3014 if (rcStrict == VINF_SUCCESS)
3015 {
3016 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3017 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3018 pVCpu->iem.s.abOpcode[offOpcode + 1],
3019 pVCpu->iem.s.abOpcode[offOpcode + 2],
3020 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3021 pVCpu->iem.s.offOpcode = offOpcode + 4;
3022 }
3023 else
3024 *pu64 = 0;
3025 return rcStrict;
3026}
3027
3028
3029/**
3030 * Fetches the next opcode dword, sign extending it into a quad word.
3031 *
3032 * @returns Strict VBox status code.
3033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3034 * @param pu64 Where to return the opcode quad word.
3035 */
3036DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3037{
3038 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3039 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3040 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3041
3042 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3043 pVCpu->iem.s.abOpcode[offOpcode + 1],
3044 pVCpu->iem.s.abOpcode[offOpcode + 2],
3045 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3046 *pu64 = i32;
3047 pVCpu->iem.s.offOpcode = offOpcode + 4;
3048 return VINF_SUCCESS;
3049}
3050
3051#endif /* !IEM_WITH_SETJMP */
3052
3053
3054/**
3055 * Fetches the next opcode double word and sign extends it to a quad word,
3056 * returns automatically on failure.
3057 *
3058 * @param a_pu64 Where to return the opcode quad word.
3059 * @remark Implicitly references pVCpu.
3060 */
3061#ifndef IEM_WITH_SETJMP
3062# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3063 do \
3064 { \
3065 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3066 if (rcStrict2 != VINF_SUCCESS) \
3067 return rcStrict2; \
3068 } while (0)
3069#else
3070# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3071#endif
3072
3073#ifndef IEM_WITH_SETJMP
3074
3075/**
3076 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3077 *
3078 * @returns Strict VBox status code.
3079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3080 * @param pu64 Where to return the opcode qword.
3081 */
3082DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3083{
3084 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3085 if (rcStrict == VINF_SUCCESS)
3086 {
3087 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3088# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3089 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3090# else
3091 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3092 pVCpu->iem.s.abOpcode[offOpcode + 1],
3093 pVCpu->iem.s.abOpcode[offOpcode + 2],
3094 pVCpu->iem.s.abOpcode[offOpcode + 3],
3095 pVCpu->iem.s.abOpcode[offOpcode + 4],
3096 pVCpu->iem.s.abOpcode[offOpcode + 5],
3097 pVCpu->iem.s.abOpcode[offOpcode + 6],
3098 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3099# endif
3100 pVCpu->iem.s.offOpcode = offOpcode + 8;
3101 }
3102 else
3103 *pu64 = 0;
3104 return rcStrict;
3105}
3106
3107
3108/**
3109 * Fetches the next opcode qword.
3110 *
3111 * @returns Strict VBox status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param pu64 Where to return the opcode qword.
3114 */
3115DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3116{
3117 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3118 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3119 {
3120# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3121 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3122# else
3123 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3124 pVCpu->iem.s.abOpcode[offOpcode + 1],
3125 pVCpu->iem.s.abOpcode[offOpcode + 2],
3126 pVCpu->iem.s.abOpcode[offOpcode + 3],
3127 pVCpu->iem.s.abOpcode[offOpcode + 4],
3128 pVCpu->iem.s.abOpcode[offOpcode + 5],
3129 pVCpu->iem.s.abOpcode[offOpcode + 6],
3130 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3131# endif
3132 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3133 return VINF_SUCCESS;
3134 }
3135 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3136}
3137
3138#else /* IEM_WITH_SETJMP */
3139
3140/**
3141 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3142 *
3143 * @returns The opcode qword.
3144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3145 */
3146DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3147{
3148# ifdef IEM_WITH_CODE_TLB
3149 uint64_t u64;
3150 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3151 return u64;
3152# else
3153 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3154 if (rcStrict == VINF_SUCCESS)
3155 {
3156 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3157 pVCpu->iem.s.offOpcode = offOpcode + 8;
3158# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3159 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3160# else
3161 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3162 pVCpu->iem.s.abOpcode[offOpcode + 1],
3163 pVCpu->iem.s.abOpcode[offOpcode + 2],
3164 pVCpu->iem.s.abOpcode[offOpcode + 3],
3165 pVCpu->iem.s.abOpcode[offOpcode + 4],
3166 pVCpu->iem.s.abOpcode[offOpcode + 5],
3167 pVCpu->iem.s.abOpcode[offOpcode + 6],
3168 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3169# endif
3170 }
3171 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3172# endif
3173}
3174
3175
3176/**
3177 * Fetches the next opcode qword, longjmp on error.
3178 *
3179 * @returns The opcode qword.
3180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3181 */
3182DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3183{
3184# ifdef IEM_WITH_CODE_TLB
3185 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3186 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3187 if (RT_LIKELY( pbBuf != NULL
3188 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3189 {
3190 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3191# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3192 return *(uint64_t const *)&pbBuf[offBuf];
3193# else
3194 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3195 pbBuf[offBuf + 1],
3196 pbBuf[offBuf + 2],
3197 pbBuf[offBuf + 3],
3198 pbBuf[offBuf + 4],
3199 pbBuf[offBuf + 5],
3200 pbBuf[offBuf + 6],
3201 pbBuf[offBuf + 7]);
3202# endif
3203 }
3204# else
3205 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3206 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3207 {
3208 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3209# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3210 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3211# else
3212 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3213 pVCpu->iem.s.abOpcode[offOpcode + 1],
3214 pVCpu->iem.s.abOpcode[offOpcode + 2],
3215 pVCpu->iem.s.abOpcode[offOpcode + 3],
3216 pVCpu->iem.s.abOpcode[offOpcode + 4],
3217 pVCpu->iem.s.abOpcode[offOpcode + 5],
3218 pVCpu->iem.s.abOpcode[offOpcode + 6],
3219 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3220# endif
3221 }
3222# endif
3223 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3224}
3225
3226#endif /* IEM_WITH_SETJMP */
3227
3228/**
3229 * Fetches the next opcode quad word, returns automatically on failure.
3230 *
3231 * @param a_pu64 Where to return the opcode quad word.
3232 * @remark Implicitly references pVCpu.
3233 */
3234#ifndef IEM_WITH_SETJMP
3235# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3236 do \
3237 { \
3238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3239 if (rcStrict2 != VINF_SUCCESS) \
3240 return rcStrict2; \
3241 } while (0)
3242#else
3243# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3244#endif
3245
3246
3247/** @name Misc Worker Functions.
3248 * @{
3249 */
3250
3251/**
3252 * Gets the exception class for the specified exception vector.
3253 *
3254 * @returns The class of the specified exception.
3255 * @param uVector The exception vector.
3256 */
3257IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3258{
3259 Assert(uVector <= X86_XCPT_LAST);
3260 switch (uVector)
3261 {
3262 case X86_XCPT_DE:
3263 case X86_XCPT_TS:
3264 case X86_XCPT_NP:
3265 case X86_XCPT_SS:
3266 case X86_XCPT_GP:
3267 case X86_XCPT_SX: /* AMD only */
3268 return IEMXCPTCLASS_CONTRIBUTORY;
3269
3270 case X86_XCPT_PF:
3271 case X86_XCPT_VE: /* Intel only */
3272 return IEMXCPTCLASS_PAGE_FAULT;
3273
3274 case X86_XCPT_DF:
3275 return IEMXCPTCLASS_DOUBLE_FAULT;
3276 }
3277 return IEMXCPTCLASS_BENIGN;
3278}
3279
3280
3281/**
3282 * Evaluates how to handle an exception caused during delivery of another event
3283 * (exception / interrupt).
3284 *
3285 * @returns How to handle the recursive exception.
3286 * @param pVCpu The cross context virtual CPU structure of the
3287 * calling thread.
3288 * @param fPrevFlags The flags of the previous event.
3289 * @param uPrevVector The vector of the previous event.
3290 * @param fCurFlags The flags of the current exception.
3291 * @param uCurVector The vector of the current exception.
3292 * @param pfXcptRaiseInfo Where to store additional information about the
3293 * exception condition. Optional.
3294 */
3295VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3296 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3297{
3298 /*
3299 * Only CPU exceptions can be raised while delivering other events, software interrupt
3300 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3301 */
3302 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3303 Assert(pVCpu); RT_NOREF(pVCpu);
3304 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3305
3306 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3307 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3308 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3309 {
3310 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3311 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3312 {
3313 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3314 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3315 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3316 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3317 {
3318 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3319 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3320 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3321 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3322 uCurVector, pVCpu->cpum.GstCtx.cr2));
3323 }
3324 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3325 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3326 {
3327 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3328 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3329 }
3330 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3331 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3332 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3333 {
3334 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3335 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3336 }
3337 }
3338 else
3339 {
3340 if (uPrevVector == X86_XCPT_NMI)
3341 {
3342 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3343 if (uCurVector == X86_XCPT_PF)
3344 {
3345 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3346 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3347 }
3348 }
3349 else if ( uPrevVector == X86_XCPT_AC
3350 && uCurVector == X86_XCPT_AC)
3351 {
3352 enmRaise = IEMXCPTRAISE_CPU_HANG;
3353 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3354 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3355 }
3356 }
3357 }
3358 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3359 {
3360 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3361 if (uCurVector == X86_XCPT_PF)
3362 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3363 }
3364 else
3365 {
3366 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3367 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3368 }
3369
3370 if (pfXcptRaiseInfo)
3371 *pfXcptRaiseInfo = fRaiseInfo;
3372 return enmRaise;
3373}
3374
3375
3376/**
3377 * Enters the CPU shutdown state initiated by a triple fault or other
3378 * unrecoverable conditions.
3379 *
3380 * @returns Strict VBox status code.
3381 * @param pVCpu The cross context virtual CPU structure of the
3382 * calling thread.
3383 */
3384IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3385{
3386 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3387 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3388
3389 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3390 {
3391 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3392 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3393 }
3394
3395 RT_NOREF(pVCpu);
3396 return VINF_EM_TRIPLE_FAULT;
3397}
3398
3399
3400/**
3401 * Validates a new SS segment.
3402 *
3403 * @returns VBox strict status code.
3404 * @param pVCpu The cross context virtual CPU structure of the
3405 * calling thread.
3406 * @param NewSS The new SS selctor.
3407 * @param uCpl The CPL to load the stack for.
3408 * @param pDesc Where to return the descriptor.
3409 */
3410IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3411{
3412 /* Null selectors are not allowed (we're not called for dispatching
3413 interrupts with SS=0 in long mode). */
3414 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3417 return iemRaiseTaskSwitchFault0(pVCpu);
3418 }
3419
3420 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3421 if ((NewSS & X86_SEL_RPL) != uCpl)
3422 {
3423 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3424 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3425 }
3426
3427 /*
3428 * Read the descriptor.
3429 */
3430 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3431 if (rcStrict != VINF_SUCCESS)
3432 return rcStrict;
3433
3434 /*
3435 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3436 */
3437 if (!pDesc->Legacy.Gen.u1DescType)
3438 {
3439 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3440 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3441 }
3442
3443 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3444 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3445 {
3446 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3447 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3448 }
3449 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3450 {
3451 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3452 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3453 }
3454
3455 /* Is it there? */
3456 /** @todo testcase: Is this checked before the canonical / limit check below? */
3457 if (!pDesc->Legacy.Gen.u1Present)
3458 {
3459 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3460 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3461 }
3462
3463 return VINF_SUCCESS;
3464}
3465
3466
3467/**
3468 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3469 * not (kind of obsolete now).
3470 *
3471 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3472 */
3473#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3474
3475/**
3476 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3477 *
3478 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3479 * @param a_fEfl The new EFLAGS.
3480 */
3481#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3482
3483/** @} */
3484
3485
3486/** @name Raising Exceptions.
3487 *
3488 * @{
3489 */
3490
3491
3492/**
3493 * Loads the specified stack far pointer from the TSS.
3494 *
3495 * @returns VBox strict status code.
3496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3497 * @param uCpl The CPL to load the stack for.
3498 * @param pSelSS Where to return the new stack segment.
3499 * @param puEsp Where to return the new stack pointer.
3500 */
3501IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3502{
3503 VBOXSTRICTRC rcStrict;
3504 Assert(uCpl < 4);
3505
3506 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3507 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3508 {
3509 /*
3510 * 16-bit TSS (X86TSS16).
3511 */
3512 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3513 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3514 {
3515 uint32_t off = uCpl * 4 + 2;
3516 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3517 {
3518 /** @todo check actual access pattern here. */
3519 uint32_t u32Tmp = 0; /* gcc maybe... */
3520 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3521 if (rcStrict == VINF_SUCCESS)
3522 {
3523 *puEsp = RT_LOWORD(u32Tmp);
3524 *pSelSS = RT_HIWORD(u32Tmp);
3525 return VINF_SUCCESS;
3526 }
3527 }
3528 else
3529 {
3530 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3531 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3532 }
3533 break;
3534 }
3535
3536 /*
3537 * 32-bit TSS (X86TSS32).
3538 */
3539 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3540 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3541 {
3542 uint32_t off = uCpl * 8 + 4;
3543 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3544 {
3545/** @todo check actual access pattern here. */
3546 uint64_t u64Tmp;
3547 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3548 if (rcStrict == VINF_SUCCESS)
3549 {
3550 *puEsp = u64Tmp & UINT32_MAX;
3551 *pSelSS = (RTSEL)(u64Tmp >> 32);
3552 return VINF_SUCCESS;
3553 }
3554 }
3555 else
3556 {
3557 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3558 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3559 }
3560 break;
3561 }
3562
3563 default:
3564 AssertFailed();
3565 rcStrict = VERR_IEM_IPE_4;
3566 break;
3567 }
3568
3569 *puEsp = 0; /* make gcc happy */
3570 *pSelSS = 0; /* make gcc happy */
3571 return rcStrict;
3572}
3573
3574
3575/**
3576 * Loads the specified stack pointer from the 64-bit TSS.
3577 *
3578 * @returns VBox strict status code.
3579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3580 * @param uCpl The CPL to load the stack for.
3581 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3582 * @param puRsp Where to return the new stack pointer.
3583 */
3584IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3585{
3586 Assert(uCpl < 4);
3587 Assert(uIst < 8);
3588 *puRsp = 0; /* make gcc happy */
3589
3590 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3591 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3592
3593 uint32_t off;
3594 if (uIst)
3595 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3596 else
3597 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3598 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3599 {
3600 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3601 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3602 }
3603
3604 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3605}
3606
3607
3608/**
3609 * Adjust the CPU state according to the exception being raised.
3610 *
3611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3612 * @param u8Vector The exception that has been raised.
3613 */
3614DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3615{
3616 switch (u8Vector)
3617 {
3618 case X86_XCPT_DB:
3619 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3620 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3621 break;
3622 /** @todo Read the AMD and Intel exception reference... */
3623 }
3624}
3625
3626
3627/**
3628 * Implements exceptions and interrupts for real mode.
3629 *
3630 * @returns VBox strict status code.
3631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3632 * @param cbInstr The number of bytes to offset rIP by in the return
3633 * address.
3634 * @param u8Vector The interrupt / exception vector number.
3635 * @param fFlags The flags.
3636 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3637 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3638 */
3639IEM_STATIC VBOXSTRICTRC
3640iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3641 uint8_t cbInstr,
3642 uint8_t u8Vector,
3643 uint32_t fFlags,
3644 uint16_t uErr,
3645 uint64_t uCr2)
3646{
3647 NOREF(uErr); NOREF(uCr2);
3648 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3649
3650 /*
3651 * Read the IDT entry.
3652 */
3653 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3654 {
3655 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3656 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3657 }
3658 RTFAR16 Idte;
3659 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3660 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3661 {
3662 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3663 return rcStrict;
3664 }
3665
3666 /*
3667 * Push the stack frame.
3668 */
3669 uint16_t *pu16Frame;
3670 uint64_t uNewRsp;
3671 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3672 if (rcStrict != VINF_SUCCESS)
3673 return rcStrict;
3674
3675 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3676#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3677 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3678 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3679 fEfl |= UINT16_C(0xf000);
3680#endif
3681 pu16Frame[2] = (uint16_t)fEfl;
3682 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3683 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3684 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3685 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3686 return rcStrict;
3687
3688 /*
3689 * Load the vector address into cs:ip and make exception specific state
3690 * adjustments.
3691 */
3692 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3693 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3694 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3695 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3696 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3697 pVCpu->cpum.GstCtx.rip = Idte.off;
3698 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3699 IEMMISC_SET_EFL(pVCpu, fEfl);
3700
3701 /** @todo do we actually do this in real mode? */
3702 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3703 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3704
3705 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3706}
3707
3708
3709/**
3710 * Loads a NULL data selector into when coming from V8086 mode.
3711 *
3712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3713 * @param pSReg Pointer to the segment register.
3714 */
3715IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3716{
3717 pSReg->Sel = 0;
3718 pSReg->ValidSel = 0;
3719 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3720 {
3721 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3722 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3723 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3724 }
3725 else
3726 {
3727 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3728 /** @todo check this on AMD-V */
3729 pSReg->u64Base = 0;
3730 pSReg->u32Limit = 0;
3731 }
3732}
3733
3734
3735/**
3736 * Loads a segment selector during a task switch in V8086 mode.
3737 *
3738 * @param pSReg Pointer to the segment register.
3739 * @param uSel The selector value to load.
3740 */
3741IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3742{
3743 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3744 pSReg->Sel = uSel;
3745 pSReg->ValidSel = uSel;
3746 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3747 pSReg->u64Base = uSel << 4;
3748 pSReg->u32Limit = 0xffff;
3749 pSReg->Attr.u = 0xf3;
3750}
3751
3752
3753/**
3754 * Loads a NULL data selector into a selector register, both the hidden and
3755 * visible parts, in protected mode.
3756 *
3757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3758 * @param pSReg Pointer to the segment register.
3759 * @param uRpl The RPL.
3760 */
3761IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3762{
3763 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3764 * data selector in protected mode. */
3765 pSReg->Sel = uRpl;
3766 pSReg->ValidSel = uRpl;
3767 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3768 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3769 {
3770 /* VT-x (Intel 3960x) observed doing something like this. */
3771 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3772 pSReg->u32Limit = UINT32_MAX;
3773 pSReg->u64Base = 0;
3774 }
3775 else
3776 {
3777 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3778 pSReg->u32Limit = 0;
3779 pSReg->u64Base = 0;
3780 }
3781}
3782
3783
3784/**
3785 * Loads a segment selector during a task switch in protected mode.
3786 *
3787 * In this task switch scenario, we would throw \#TS exceptions rather than
3788 * \#GPs.
3789 *
3790 * @returns VBox strict status code.
3791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3792 * @param pSReg Pointer to the segment register.
3793 * @param uSel The new selector value.
3794 *
3795 * @remarks This does _not_ handle CS or SS.
3796 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3797 */
3798IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3799{
3800 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3801
3802 /* Null data selector. */
3803 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3804 {
3805 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3807 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3808 return VINF_SUCCESS;
3809 }
3810
3811 /* Fetch the descriptor. */
3812 IEMSELDESC Desc;
3813 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3814 if (rcStrict != VINF_SUCCESS)
3815 {
3816 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3817 VBOXSTRICTRC_VAL(rcStrict)));
3818 return rcStrict;
3819 }
3820
3821 /* Must be a data segment or readable code segment. */
3822 if ( !Desc.Legacy.Gen.u1DescType
3823 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3824 {
3825 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3826 Desc.Legacy.Gen.u4Type));
3827 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3828 }
3829
3830 /* Check privileges for data segments and non-conforming code segments. */
3831 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3832 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3833 {
3834 /* The RPL and the new CPL must be less than or equal to the DPL. */
3835 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3836 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3837 {
3838 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3839 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3840 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3841 }
3842 }
3843
3844 /* Is it there? */
3845 if (!Desc.Legacy.Gen.u1Present)
3846 {
3847 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3848 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3849 }
3850
3851 /* The base and limit. */
3852 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3853 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3854
3855 /*
3856 * Ok, everything checked out fine. Now set the accessed bit before
3857 * committing the result into the registers.
3858 */
3859 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3860 {
3861 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3862 if (rcStrict != VINF_SUCCESS)
3863 return rcStrict;
3864 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3865 }
3866
3867 /* Commit */
3868 pSReg->Sel = uSel;
3869 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3870 pSReg->u32Limit = cbLimit;
3871 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3872 pSReg->ValidSel = uSel;
3873 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3874 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3875 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3876
3877 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3878 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3879 return VINF_SUCCESS;
3880}
3881
3882
3883/**
3884 * Performs a task switch.
3885 *
3886 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3887 * caller is responsible for performing the necessary checks (like DPL, TSS
3888 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3889 * reference for JMP, CALL, IRET.
3890 *
3891 * If the task switch is the due to a software interrupt or hardware exception,
3892 * the caller is responsible for validating the TSS selector and descriptor. See
3893 * Intel Instruction reference for INT n.
3894 *
3895 * @returns VBox strict status code.
3896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3897 * @param enmTaskSwitch The cause of the task switch.
3898 * @param uNextEip The EIP effective after the task switch.
3899 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3900 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3901 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3902 * @param SelTSS The TSS selector of the new task.
3903 * @param pNewDescTSS Pointer to the new TSS descriptor.
3904 */
3905IEM_STATIC VBOXSTRICTRC
3906iemTaskSwitch(PVMCPUCC pVCpu,
3907 IEMTASKSWITCH enmTaskSwitch,
3908 uint32_t uNextEip,
3909 uint32_t fFlags,
3910 uint16_t uErr,
3911 uint64_t uCr2,
3912 RTSEL SelTSS,
3913 PIEMSELDESC pNewDescTSS)
3914{
3915 Assert(!IEM_IS_REAL_MODE(pVCpu));
3916 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3917 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3918
3919 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3920 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3921 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3922 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3923 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3924
3925 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3926 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3927
3928 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3929 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3930
3931 /* Update CR2 in case it's a page-fault. */
3932 /** @todo This should probably be done much earlier in IEM/PGM. See
3933 * @bugref{5653#c49}. */
3934 if (fFlags & IEM_XCPT_FLAGS_CR2)
3935 pVCpu->cpum.GstCtx.cr2 = uCr2;
3936
3937 /*
3938 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3939 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3940 */
3941 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3942 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3943 if (uNewTSSLimit < uNewTSSLimitMin)
3944 {
3945 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3946 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3947 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3948 }
3949
3950 /*
3951 * Task switches in VMX non-root mode always cause task switches.
3952 * The new TSS must have been read and validated (DPL, limits etc.) before a
3953 * task-switch VM-exit commences.
3954 *
3955 * See Intel spec. 25.4.2 "Treatment of Task Switches".
3956 */
3957 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3958 {
3959 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3960 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3961 }
3962
3963 /*
3964 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3965 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3966 */
3967 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3968 {
3969 uint32_t const uExitInfo1 = SelTSS;
3970 uint32_t uExitInfo2 = uErr;
3971 switch (enmTaskSwitch)
3972 {
3973 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3974 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3975 default: break;
3976 }
3977 if (fFlags & IEM_XCPT_FLAGS_ERR)
3978 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3979 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3980 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3981
3982 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3983 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3984 RT_NOREF2(uExitInfo1, uExitInfo2);
3985 }
3986
3987 /*
3988 * Check the current TSS limit. The last written byte to the current TSS during the
3989 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3990 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3991 *
3992 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3993 * end up with smaller than "legal" TSS limits.
3994 */
3995 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3996 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3997 if (uCurTSSLimit < uCurTSSLimitMin)
3998 {
3999 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4000 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4001 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4002 }
4003
4004 /*
4005 * Verify that the new TSS can be accessed and map it. Map only the required contents
4006 * and not the entire TSS.
4007 */
4008 void *pvNewTSS;
4009 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4010 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4011 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4012 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4013 * not perform correct translation if this happens. See Intel spec. 7.2.1
4014 * "Task-State Segment". */
4015 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4016 if (rcStrict != VINF_SUCCESS)
4017 {
4018 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4019 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4020 return rcStrict;
4021 }
4022
4023 /*
4024 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4025 */
4026 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4027 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4028 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4029 {
4030 PX86DESC pDescCurTSS;
4031 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4032 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4033 if (rcStrict != VINF_SUCCESS)
4034 {
4035 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4036 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4037 return rcStrict;
4038 }
4039
4040 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4041 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4042 if (rcStrict != VINF_SUCCESS)
4043 {
4044 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4045 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4046 return rcStrict;
4047 }
4048
4049 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4050 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4051 {
4052 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4053 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4054 u32EFlags &= ~X86_EFL_NT;
4055 }
4056 }
4057
4058 /*
4059 * Save the CPU state into the current TSS.
4060 */
4061 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4062 if (GCPtrNewTSS == GCPtrCurTSS)
4063 {
4064 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4065 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4066 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4067 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4068 pVCpu->cpum.GstCtx.ldtr.Sel));
4069 }
4070 if (fIsNewTSS386)
4071 {
4072 /*
4073 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4074 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4075 */
4076 void *pvCurTSS32;
4077 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4078 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4079 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4080 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4081 if (rcStrict != VINF_SUCCESS)
4082 {
4083 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4084 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4085 return rcStrict;
4086 }
4087
4088 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4089 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4090 pCurTSS32->eip = uNextEip;
4091 pCurTSS32->eflags = u32EFlags;
4092 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4093 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4094 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4095 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4096 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4097 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4098 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4099 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4100 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4101 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4102 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4103 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4104 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4105 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4106
4107 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4108 if (rcStrict != VINF_SUCCESS)
4109 {
4110 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4111 VBOXSTRICTRC_VAL(rcStrict)));
4112 return rcStrict;
4113 }
4114 }
4115 else
4116 {
4117 /*
4118 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4119 */
4120 void *pvCurTSS16;
4121 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4122 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4123 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4124 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4125 if (rcStrict != VINF_SUCCESS)
4126 {
4127 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4128 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4129 return rcStrict;
4130 }
4131
4132 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4133 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4134 pCurTSS16->ip = uNextEip;
4135 pCurTSS16->flags = u32EFlags;
4136 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4137 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4138 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4139 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4140 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4141 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4142 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4143 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4144 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4145 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4146 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4147 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4148
4149 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4150 if (rcStrict != VINF_SUCCESS)
4151 {
4152 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4153 VBOXSTRICTRC_VAL(rcStrict)));
4154 return rcStrict;
4155 }
4156 }
4157
4158 /*
4159 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4160 */
4161 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4162 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4163 {
4164 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4165 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4166 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4167 }
4168
4169 /*
4170 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4171 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4172 */
4173 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4174 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4175 bool fNewDebugTrap;
4176 if (fIsNewTSS386)
4177 {
4178 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4179 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4180 uNewEip = pNewTSS32->eip;
4181 uNewEflags = pNewTSS32->eflags;
4182 uNewEax = pNewTSS32->eax;
4183 uNewEcx = pNewTSS32->ecx;
4184 uNewEdx = pNewTSS32->edx;
4185 uNewEbx = pNewTSS32->ebx;
4186 uNewEsp = pNewTSS32->esp;
4187 uNewEbp = pNewTSS32->ebp;
4188 uNewEsi = pNewTSS32->esi;
4189 uNewEdi = pNewTSS32->edi;
4190 uNewES = pNewTSS32->es;
4191 uNewCS = pNewTSS32->cs;
4192 uNewSS = pNewTSS32->ss;
4193 uNewDS = pNewTSS32->ds;
4194 uNewFS = pNewTSS32->fs;
4195 uNewGS = pNewTSS32->gs;
4196 uNewLdt = pNewTSS32->selLdt;
4197 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4198 }
4199 else
4200 {
4201 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4202 uNewCr3 = 0;
4203 uNewEip = pNewTSS16->ip;
4204 uNewEflags = pNewTSS16->flags;
4205 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4206 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4207 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4208 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4209 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4210 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4211 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4212 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4213 uNewES = pNewTSS16->es;
4214 uNewCS = pNewTSS16->cs;
4215 uNewSS = pNewTSS16->ss;
4216 uNewDS = pNewTSS16->ds;
4217 uNewFS = 0;
4218 uNewGS = 0;
4219 uNewLdt = pNewTSS16->selLdt;
4220 fNewDebugTrap = false;
4221 }
4222
4223 if (GCPtrNewTSS == GCPtrCurTSS)
4224 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4225 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4226
4227 /*
4228 * We're done accessing the new TSS.
4229 */
4230 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4231 if (rcStrict != VINF_SUCCESS)
4232 {
4233 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4234 return rcStrict;
4235 }
4236
4237 /*
4238 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4239 */
4240 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4241 {
4242 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4243 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4244 if (rcStrict != VINF_SUCCESS)
4245 {
4246 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4247 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4248 return rcStrict;
4249 }
4250
4251 /* Check that the descriptor indicates the new TSS is available (not busy). */
4252 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4253 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4254 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4255
4256 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4257 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4258 if (rcStrict != VINF_SUCCESS)
4259 {
4260 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4261 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264 }
4265
4266 /*
4267 * From this point on, we're technically in the new task. We will defer exceptions
4268 * until the completion of the task switch but before executing any instructions in the new task.
4269 */
4270 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4271 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4272 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4273 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4274 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4275 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4276 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4277
4278 /* Set the busy bit in TR. */
4279 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4280
4281 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4282 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4283 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4284 {
4285 uNewEflags |= X86_EFL_NT;
4286 }
4287
4288 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4289 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4290 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4291
4292 pVCpu->cpum.GstCtx.eip = uNewEip;
4293 pVCpu->cpum.GstCtx.eax = uNewEax;
4294 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4295 pVCpu->cpum.GstCtx.edx = uNewEdx;
4296 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4297 pVCpu->cpum.GstCtx.esp = uNewEsp;
4298 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4299 pVCpu->cpum.GstCtx.esi = uNewEsi;
4300 pVCpu->cpum.GstCtx.edi = uNewEdi;
4301
4302 uNewEflags &= X86_EFL_LIVE_MASK;
4303 uNewEflags |= X86_EFL_RA1_MASK;
4304 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4305
4306 /*
4307 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4308 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4309 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4310 */
4311 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4312 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4313
4314 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4315 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4316
4317 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4318 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4319
4320 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4321 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4322
4323 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4324 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4325
4326 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4327 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4328 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4329
4330 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4331 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4332 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4333 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4334
4335 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4336 {
4337 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4338 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4339 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4340 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4341 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4342 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4343 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4344 }
4345
4346 /*
4347 * Switch CR3 for the new task.
4348 */
4349 if ( fIsNewTSS386
4350 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4351 {
4352 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4353 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4354 AssertRCSuccessReturn(rc, rc);
4355
4356 /* Inform PGM. */
4357 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4358 AssertRCReturn(rc, rc);
4359 /* ignore informational status codes */
4360
4361 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4362 }
4363
4364 /*
4365 * Switch LDTR for the new task.
4366 */
4367 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4368 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4369 else
4370 {
4371 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4372
4373 IEMSELDESC DescNewLdt;
4374 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4375 if (rcStrict != VINF_SUCCESS)
4376 {
4377 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4378 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4379 return rcStrict;
4380 }
4381 if ( !DescNewLdt.Legacy.Gen.u1Present
4382 || DescNewLdt.Legacy.Gen.u1DescType
4383 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4384 {
4385 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4386 uNewLdt, DescNewLdt.Legacy.u));
4387 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4388 }
4389
4390 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4391 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4392 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4393 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4394 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4395 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4396 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4397 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4398 }
4399
4400 IEMSELDESC DescSS;
4401 if (IEM_IS_V86_MODE(pVCpu))
4402 {
4403 pVCpu->iem.s.uCpl = 3;
4404 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4405 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4406 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4407 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4408 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4409 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4410
4411 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4412 DescSS.Legacy.u = 0;
4413 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4414 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4415 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4416 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4417 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4418 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4419 DescSS.Legacy.Gen.u2Dpl = 3;
4420 }
4421 else
4422 {
4423 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4424
4425 /*
4426 * Load the stack segment for the new task.
4427 */
4428 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4429 {
4430 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4431 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4432 }
4433
4434 /* Fetch the descriptor. */
4435 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4436 if (rcStrict != VINF_SUCCESS)
4437 {
4438 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4439 VBOXSTRICTRC_VAL(rcStrict)));
4440 return rcStrict;
4441 }
4442
4443 /* SS must be a data segment and writable. */
4444 if ( !DescSS.Legacy.Gen.u1DescType
4445 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4446 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4447 {
4448 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4449 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4450 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4451 }
4452
4453 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4454 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4455 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4456 {
4457 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4458 uNewCpl));
4459 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4460 }
4461
4462 /* Is it there? */
4463 if (!DescSS.Legacy.Gen.u1Present)
4464 {
4465 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4466 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4467 }
4468
4469 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4470 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4471
4472 /* Set the accessed bit before committing the result into SS. */
4473 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4474 {
4475 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4476 if (rcStrict != VINF_SUCCESS)
4477 return rcStrict;
4478 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4479 }
4480
4481 /* Commit SS. */
4482 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4483 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4484 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4485 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4486 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4487 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4488 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4489
4490 /* CPL has changed, update IEM before loading rest of segments. */
4491 pVCpu->iem.s.uCpl = uNewCpl;
4492
4493 /*
4494 * Load the data segments for the new task.
4495 */
4496 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4497 if (rcStrict != VINF_SUCCESS)
4498 return rcStrict;
4499 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4500 if (rcStrict != VINF_SUCCESS)
4501 return rcStrict;
4502 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4503 if (rcStrict != VINF_SUCCESS)
4504 return rcStrict;
4505 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4506 if (rcStrict != VINF_SUCCESS)
4507 return rcStrict;
4508
4509 /*
4510 * Load the code segment for the new task.
4511 */
4512 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4513 {
4514 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4515 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4516 }
4517
4518 /* Fetch the descriptor. */
4519 IEMSELDESC DescCS;
4520 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4521 if (rcStrict != VINF_SUCCESS)
4522 {
4523 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4524 return rcStrict;
4525 }
4526
4527 /* CS must be a code segment. */
4528 if ( !DescCS.Legacy.Gen.u1DescType
4529 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4530 {
4531 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4532 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4533 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4534 }
4535
4536 /* For conforming CS, DPL must be less than or equal to the RPL. */
4537 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4538 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4539 {
4540 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4541 DescCS.Legacy.Gen.u2Dpl));
4542 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4543 }
4544
4545 /* For non-conforming CS, DPL must match RPL. */
4546 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4547 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4548 {
4549 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4550 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4551 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4552 }
4553
4554 /* Is it there? */
4555 if (!DescCS.Legacy.Gen.u1Present)
4556 {
4557 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4558 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4559 }
4560
4561 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4562 u64Base = X86DESC_BASE(&DescCS.Legacy);
4563
4564 /* Set the accessed bit before committing the result into CS. */
4565 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4566 {
4567 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4568 if (rcStrict != VINF_SUCCESS)
4569 return rcStrict;
4570 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4571 }
4572
4573 /* Commit CS. */
4574 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4575 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4576 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4577 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4578 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4579 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4580 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4581 }
4582
4583 /** @todo Debug trap. */
4584 if (fIsNewTSS386 && fNewDebugTrap)
4585 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4586
4587 /*
4588 * Construct the error code masks based on what caused this task switch.
4589 * See Intel Instruction reference for INT.
4590 */
4591 uint16_t uExt;
4592 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4593 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4594 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4595 {
4596 uExt = 1;
4597 }
4598 else
4599 uExt = 0;
4600
4601 /*
4602 * Push any error code on to the new stack.
4603 */
4604 if (fFlags & IEM_XCPT_FLAGS_ERR)
4605 {
4606 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4607 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4608 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4609
4610 /* Check that there is sufficient space on the stack. */
4611 /** @todo Factor out segment limit checking for normal/expand down segments
4612 * into a separate function. */
4613 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4614 {
4615 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4616 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4617 {
4618 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4619 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4620 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4621 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4622 }
4623 }
4624 else
4625 {
4626 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4627 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4628 {
4629 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4630 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4631 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4632 }
4633 }
4634
4635
4636 if (fIsNewTSS386)
4637 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4638 else
4639 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4640 if (rcStrict != VINF_SUCCESS)
4641 {
4642 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4643 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4644 return rcStrict;
4645 }
4646 }
4647
4648 /* Check the new EIP against the new CS limit. */
4649 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4650 {
4651 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4652 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4653 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4654 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4655 }
4656
4657 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4658 pVCpu->cpum.GstCtx.ss.Sel));
4659 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4660}
4661
4662
4663/**
4664 * Implements exceptions and interrupts for protected mode.
4665 *
4666 * @returns VBox strict status code.
4667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4668 * @param cbInstr The number of bytes to offset rIP by in the return
4669 * address.
4670 * @param u8Vector The interrupt / exception vector number.
4671 * @param fFlags The flags.
4672 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4673 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4674 */
4675IEM_STATIC VBOXSTRICTRC
4676iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4677 uint8_t cbInstr,
4678 uint8_t u8Vector,
4679 uint32_t fFlags,
4680 uint16_t uErr,
4681 uint64_t uCr2)
4682{
4683 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4684
4685 /*
4686 * Read the IDT entry.
4687 */
4688 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4689 {
4690 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4691 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4692 }
4693 X86DESC Idte;
4694 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4695 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4696 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4697 {
4698 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4699 return rcStrict;
4700 }
4701 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4702 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4703 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4704
4705 /*
4706 * Check the descriptor type, DPL and such.
4707 * ASSUMES this is done in the same order as described for call-gate calls.
4708 */
4709 if (Idte.Gate.u1DescType)
4710 {
4711 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4712 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4713 }
4714 bool fTaskGate = false;
4715 uint8_t f32BitGate = true;
4716 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4717 switch (Idte.Gate.u4Type)
4718 {
4719 case X86_SEL_TYPE_SYS_UNDEFINED:
4720 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4721 case X86_SEL_TYPE_SYS_LDT:
4722 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4723 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4724 case X86_SEL_TYPE_SYS_UNDEFINED2:
4725 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4726 case X86_SEL_TYPE_SYS_UNDEFINED3:
4727 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4728 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4729 case X86_SEL_TYPE_SYS_UNDEFINED4:
4730 {
4731 /** @todo check what actually happens when the type is wrong...
4732 * esp. call gates. */
4733 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4734 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4735 }
4736
4737 case X86_SEL_TYPE_SYS_286_INT_GATE:
4738 f32BitGate = false;
4739 RT_FALL_THRU();
4740 case X86_SEL_TYPE_SYS_386_INT_GATE:
4741 fEflToClear |= X86_EFL_IF;
4742 break;
4743
4744 case X86_SEL_TYPE_SYS_TASK_GATE:
4745 fTaskGate = true;
4746#ifndef IEM_IMPLEMENTS_TASKSWITCH
4747 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4748#endif
4749 break;
4750
4751 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4752 f32BitGate = false;
4753 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4754 break;
4755
4756 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4757 }
4758
4759 /* Check DPL against CPL if applicable. */
4760 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4761 {
4762 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4763 {
4764 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4765 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4766 }
4767 }
4768
4769 /* Is it there? */
4770 if (!Idte.Gate.u1Present)
4771 {
4772 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4773 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4774 }
4775
4776 /* Is it a task-gate? */
4777 if (fTaskGate)
4778 {
4779 /*
4780 * Construct the error code masks based on what caused this task switch.
4781 * See Intel Instruction reference for INT.
4782 */
4783 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4784 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4785 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4786 RTSEL SelTSS = Idte.Gate.u16Sel;
4787
4788 /*
4789 * Fetch the TSS descriptor in the GDT.
4790 */
4791 IEMSELDESC DescTSS;
4792 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4793 if (rcStrict != VINF_SUCCESS)
4794 {
4795 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4796 VBOXSTRICTRC_VAL(rcStrict)));
4797 return rcStrict;
4798 }
4799
4800 /* The TSS descriptor must be a system segment and be available (not busy). */
4801 if ( DescTSS.Legacy.Gen.u1DescType
4802 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4803 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4804 {
4805 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4806 u8Vector, SelTSS, DescTSS.Legacy.au64));
4807 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4808 }
4809
4810 /* The TSS must be present. */
4811 if (!DescTSS.Legacy.Gen.u1Present)
4812 {
4813 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4814 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4815 }
4816
4817 /* Do the actual task switch. */
4818 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4819 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4820 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4821 }
4822
4823 /* A null CS is bad. */
4824 RTSEL NewCS = Idte.Gate.u16Sel;
4825 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4826 {
4827 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4828 return iemRaiseGeneralProtectionFault0(pVCpu);
4829 }
4830
4831 /* Fetch the descriptor for the new CS. */
4832 IEMSELDESC DescCS;
4833 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4834 if (rcStrict != VINF_SUCCESS)
4835 {
4836 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4837 return rcStrict;
4838 }
4839
4840 /* Must be a code segment. */
4841 if (!DescCS.Legacy.Gen.u1DescType)
4842 {
4843 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4844 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4845 }
4846 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4847 {
4848 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4849 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4850 }
4851
4852 /* Don't allow lowering the privilege level. */
4853 /** @todo Does the lowering of privileges apply to software interrupts
4854 * only? This has bearings on the more-privileged or
4855 * same-privilege stack behavior further down. A testcase would
4856 * be nice. */
4857 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4858 {
4859 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4860 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4861 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4862 }
4863
4864 /* Make sure the selector is present. */
4865 if (!DescCS.Legacy.Gen.u1Present)
4866 {
4867 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4868 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4869 }
4870
4871 /* Check the new EIP against the new CS limit. */
4872 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4873 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4874 ? Idte.Gate.u16OffsetLow
4875 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4876 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4877 if (uNewEip > cbLimitCS)
4878 {
4879 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4880 u8Vector, uNewEip, cbLimitCS, NewCS));
4881 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4882 }
4883 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4884
4885 /* Calc the flag image to push. */
4886 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4887 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4888 fEfl &= ~X86_EFL_RF;
4889 else
4890 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4891
4892 /* From V8086 mode only go to CPL 0. */
4893 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4894 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4895 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4896 {
4897 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4898 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4899 }
4900
4901 /*
4902 * If the privilege level changes, we need to get a new stack from the TSS.
4903 * This in turns means validating the new SS and ESP...
4904 */
4905 if (uNewCpl != pVCpu->iem.s.uCpl)
4906 {
4907 RTSEL NewSS;
4908 uint32_t uNewEsp;
4909 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4910 if (rcStrict != VINF_SUCCESS)
4911 return rcStrict;
4912
4913 IEMSELDESC DescSS;
4914 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4915 if (rcStrict != VINF_SUCCESS)
4916 return rcStrict;
4917 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4918 if (!DescSS.Legacy.Gen.u1DefBig)
4919 {
4920 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4921 uNewEsp = (uint16_t)uNewEsp;
4922 }
4923
4924 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4925
4926 /* Check that there is sufficient space for the stack frame. */
4927 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4928 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4929 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4930 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4931
4932 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4933 {
4934 if ( uNewEsp - 1 > cbLimitSS
4935 || uNewEsp < cbStackFrame)
4936 {
4937 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4938 u8Vector, NewSS, uNewEsp, cbStackFrame));
4939 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4940 }
4941 }
4942 else
4943 {
4944 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4945 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4946 {
4947 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4948 u8Vector, NewSS, uNewEsp, cbStackFrame));
4949 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4950 }
4951 }
4952
4953 /*
4954 * Start making changes.
4955 */
4956
4957 /* Set the new CPL so that stack accesses use it. */
4958 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4959 pVCpu->iem.s.uCpl = uNewCpl;
4960
4961 /* Create the stack frame. */
4962 RTPTRUNION uStackFrame;
4963 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4964 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4965 if (rcStrict != VINF_SUCCESS)
4966 return rcStrict;
4967 void * const pvStackFrame = uStackFrame.pv;
4968 if (f32BitGate)
4969 {
4970 if (fFlags & IEM_XCPT_FLAGS_ERR)
4971 *uStackFrame.pu32++ = uErr;
4972 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4973 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4974 uStackFrame.pu32[2] = fEfl;
4975 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4976 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4977 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4978 if (fEfl & X86_EFL_VM)
4979 {
4980 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4981 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4982 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4983 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4984 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4985 }
4986 }
4987 else
4988 {
4989 if (fFlags & IEM_XCPT_FLAGS_ERR)
4990 *uStackFrame.pu16++ = uErr;
4991 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4992 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4993 uStackFrame.pu16[2] = fEfl;
4994 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4995 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4996 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4997 if (fEfl & X86_EFL_VM)
4998 {
4999 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5000 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5001 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5002 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5003 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5004 }
5005 }
5006 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5007 if (rcStrict != VINF_SUCCESS)
5008 return rcStrict;
5009
5010 /* Mark the selectors 'accessed' (hope this is the correct time). */
5011 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5012 * after pushing the stack frame? (Write protect the gdt + stack to
5013 * find out.) */
5014 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5015 {
5016 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5017 if (rcStrict != VINF_SUCCESS)
5018 return rcStrict;
5019 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5020 }
5021
5022 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5023 {
5024 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5025 if (rcStrict != VINF_SUCCESS)
5026 return rcStrict;
5027 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5028 }
5029
5030 /*
5031 * Start comitting the register changes (joins with the DPL=CPL branch).
5032 */
5033 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5034 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5035 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5036 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5037 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5038 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5039 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5040 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5041 * SP is loaded).
5042 * Need to check the other combinations too:
5043 * - 16-bit TSS, 32-bit handler
5044 * - 32-bit TSS, 16-bit handler */
5045 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5046 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5047 else
5048 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5049
5050 if (fEfl & X86_EFL_VM)
5051 {
5052 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5053 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5054 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5055 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5056 }
5057 }
5058 /*
5059 * Same privilege, no stack change and smaller stack frame.
5060 */
5061 else
5062 {
5063 uint64_t uNewRsp;
5064 RTPTRUNION uStackFrame;
5065 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5066 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5067 if (rcStrict != VINF_SUCCESS)
5068 return rcStrict;
5069 void * const pvStackFrame = uStackFrame.pv;
5070
5071 if (f32BitGate)
5072 {
5073 if (fFlags & IEM_XCPT_FLAGS_ERR)
5074 *uStackFrame.pu32++ = uErr;
5075 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5076 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5077 uStackFrame.pu32[2] = fEfl;
5078 }
5079 else
5080 {
5081 if (fFlags & IEM_XCPT_FLAGS_ERR)
5082 *uStackFrame.pu16++ = uErr;
5083 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5084 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5085 uStackFrame.pu16[2] = fEfl;
5086 }
5087 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5088 if (rcStrict != VINF_SUCCESS)
5089 return rcStrict;
5090
5091 /* Mark the CS selector as 'accessed'. */
5092 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5093 {
5094 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5095 if (rcStrict != VINF_SUCCESS)
5096 return rcStrict;
5097 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5098 }
5099
5100 /*
5101 * Start committing the register changes (joins with the other branch).
5102 */
5103 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5104 }
5105
5106 /* ... register committing continues. */
5107 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5108 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5109 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5110 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5111 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5112 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5113
5114 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5115 fEfl &= ~fEflToClear;
5116 IEMMISC_SET_EFL(pVCpu, fEfl);
5117
5118 if (fFlags & IEM_XCPT_FLAGS_CR2)
5119 pVCpu->cpum.GstCtx.cr2 = uCr2;
5120
5121 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5122 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5123
5124 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5125}
5126
5127
5128/**
5129 * Implements exceptions and interrupts for long mode.
5130 *
5131 * @returns VBox strict status code.
5132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5133 * @param cbInstr The number of bytes to offset rIP by in the return
5134 * address.
5135 * @param u8Vector The interrupt / exception vector number.
5136 * @param fFlags The flags.
5137 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5138 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5139 */
5140IEM_STATIC VBOXSTRICTRC
5141iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5142 uint8_t cbInstr,
5143 uint8_t u8Vector,
5144 uint32_t fFlags,
5145 uint16_t uErr,
5146 uint64_t uCr2)
5147{
5148 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5149
5150 /*
5151 * Read the IDT entry.
5152 */
5153 uint16_t offIdt = (uint16_t)u8Vector << 4;
5154 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5155 {
5156 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5157 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5158 }
5159 X86DESC64 Idte;
5160#ifdef _MSC_VER /* Shut up silly compiler warning. */
5161 Idte.au64[0] = 0;
5162 Idte.au64[1] = 0;
5163#endif
5164 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5165 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5166 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5167 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5168 {
5169 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5170 return rcStrict;
5171 }
5172 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5173 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5174 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5175
5176 /*
5177 * Check the descriptor type, DPL and such.
5178 * ASSUMES this is done in the same order as described for call-gate calls.
5179 */
5180 if (Idte.Gate.u1DescType)
5181 {
5182 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5183 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5184 }
5185 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5186 switch (Idte.Gate.u4Type)
5187 {
5188 case AMD64_SEL_TYPE_SYS_INT_GATE:
5189 fEflToClear |= X86_EFL_IF;
5190 break;
5191 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5192 break;
5193
5194 default:
5195 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5196 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5197 }
5198
5199 /* Check DPL against CPL if applicable. */
5200 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5201 {
5202 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5203 {
5204 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5205 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5206 }
5207 }
5208
5209 /* Is it there? */
5210 if (!Idte.Gate.u1Present)
5211 {
5212 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5213 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5214 }
5215
5216 /* A null CS is bad. */
5217 RTSEL NewCS = Idte.Gate.u16Sel;
5218 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5219 {
5220 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5221 return iemRaiseGeneralProtectionFault0(pVCpu);
5222 }
5223
5224 /* Fetch the descriptor for the new CS. */
5225 IEMSELDESC DescCS;
5226 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5227 if (rcStrict != VINF_SUCCESS)
5228 {
5229 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5230 return rcStrict;
5231 }
5232
5233 /* Must be a 64-bit code segment. */
5234 if (!DescCS.Long.Gen.u1DescType)
5235 {
5236 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5237 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5238 }
5239 if ( !DescCS.Long.Gen.u1Long
5240 || DescCS.Long.Gen.u1DefBig
5241 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5242 {
5243 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5244 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5245 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5246 }
5247
5248 /* Don't allow lowering the privilege level. For non-conforming CS
5249 selectors, the CS.DPL sets the privilege level the trap/interrupt
5250 handler runs at. For conforming CS selectors, the CPL remains
5251 unchanged, but the CS.DPL must be <= CPL. */
5252 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5253 * when CPU in Ring-0. Result \#GP? */
5254 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5255 {
5256 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5257 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5258 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5259 }
5260
5261
5262 /* Make sure the selector is present. */
5263 if (!DescCS.Legacy.Gen.u1Present)
5264 {
5265 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5266 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5267 }
5268
5269 /* Check that the new RIP is canonical. */
5270 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5271 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5272 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5273 if (!IEM_IS_CANONICAL(uNewRip))
5274 {
5275 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5276 return iemRaiseGeneralProtectionFault0(pVCpu);
5277 }
5278
5279 /*
5280 * If the privilege level changes or if the IST isn't zero, we need to get
5281 * a new stack from the TSS.
5282 */
5283 uint64_t uNewRsp;
5284 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5285 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5286 if ( uNewCpl != pVCpu->iem.s.uCpl
5287 || Idte.Gate.u3IST != 0)
5288 {
5289 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5290 if (rcStrict != VINF_SUCCESS)
5291 return rcStrict;
5292 }
5293 else
5294 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5295 uNewRsp &= ~(uint64_t)0xf;
5296
5297 /*
5298 * Calc the flag image to push.
5299 */
5300 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5301 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5302 fEfl &= ~X86_EFL_RF;
5303 else
5304 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5305
5306 /*
5307 * Start making changes.
5308 */
5309 /* Set the new CPL so that stack accesses use it. */
5310 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5311 pVCpu->iem.s.uCpl = uNewCpl;
5312
5313 /* Create the stack frame. */
5314 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5315 RTPTRUNION uStackFrame;
5316 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5317 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5318 if (rcStrict != VINF_SUCCESS)
5319 return rcStrict;
5320 void * const pvStackFrame = uStackFrame.pv;
5321
5322 if (fFlags & IEM_XCPT_FLAGS_ERR)
5323 *uStackFrame.pu64++ = uErr;
5324 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5325 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5326 uStackFrame.pu64[2] = fEfl;
5327 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5328 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5329 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5330 if (rcStrict != VINF_SUCCESS)
5331 return rcStrict;
5332
5333 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5334 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5335 * after pushing the stack frame? (Write protect the gdt + stack to
5336 * find out.) */
5337 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5338 {
5339 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5340 if (rcStrict != VINF_SUCCESS)
5341 return rcStrict;
5342 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5343 }
5344
5345 /*
5346 * Start comitting the register changes.
5347 */
5348 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5349 * hidden registers when interrupting 32-bit or 16-bit code! */
5350 if (uNewCpl != uOldCpl)
5351 {
5352 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5353 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5354 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5355 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5356 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5357 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5358 }
5359 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5360 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5361 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5362 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5363 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5364 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5365 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5366 pVCpu->cpum.GstCtx.rip = uNewRip;
5367
5368 fEfl &= ~fEflToClear;
5369 IEMMISC_SET_EFL(pVCpu, fEfl);
5370
5371 if (fFlags & IEM_XCPT_FLAGS_CR2)
5372 pVCpu->cpum.GstCtx.cr2 = uCr2;
5373
5374 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5375 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5376
5377 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5378}
5379
5380
5381/**
5382 * Implements exceptions and interrupts.
5383 *
5384 * All exceptions and interrupts goes thru this function!
5385 *
5386 * @returns VBox strict status code.
5387 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5388 * @param cbInstr The number of bytes to offset rIP by in the return
5389 * address.
5390 * @param u8Vector The interrupt / exception vector number.
5391 * @param fFlags The flags.
5392 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5393 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5394 */
5395DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5396iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5397 uint8_t cbInstr,
5398 uint8_t u8Vector,
5399 uint32_t fFlags,
5400 uint16_t uErr,
5401 uint64_t uCr2)
5402{
5403 /*
5404 * Get all the state that we might need here.
5405 */
5406 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5407 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5408
5409#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5410 /*
5411 * Flush prefetch buffer
5412 */
5413 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5414#endif
5415
5416 /*
5417 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5418 */
5419 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5420 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5421 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5422 | IEM_XCPT_FLAGS_BP_INSTR
5423 | IEM_XCPT_FLAGS_ICEBP_INSTR
5424 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5425 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5426 {
5427 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5428 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5429 u8Vector = X86_XCPT_GP;
5430 uErr = 0;
5431 }
5432#ifdef DBGFTRACE_ENABLED
5433 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5434 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5435 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5436#endif
5437
5438 /*
5439 * Evaluate whether NMI blocking should be in effect.
5440 * Normally, NMI blocking is in effect whenever we inject an NMI.
5441 */
5442 bool fBlockNmi;
5443 if ( u8Vector == X86_XCPT_NMI
5444 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5445 fBlockNmi = true;
5446 else
5447 fBlockNmi = false;
5448
5449#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5450 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5451 {
5452 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5453 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5454 return rcStrict0;
5455
5456 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5457 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5458 {
5459 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5460 fBlockNmi = false;
5461 }
5462 }
5463#endif
5464
5465#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5466 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5467 {
5468 /*
5469 * If the event is being injected as part of VMRUN, it isn't subject to event
5470 * intercepts in the nested-guest. However, secondary exceptions that occur
5471 * during injection of any event -are- subject to exception intercepts.
5472 *
5473 * See AMD spec. 15.20 "Event Injection".
5474 */
5475 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5476 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5477 else
5478 {
5479 /*
5480 * Check and handle if the event being raised is intercepted.
5481 */
5482 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5483 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5484 return rcStrict0;
5485 }
5486 }
5487#endif
5488
5489 /*
5490 * Set NMI blocking if necessary.
5491 */
5492 if ( fBlockNmi
5493 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5494 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5495
5496 /*
5497 * Do recursion accounting.
5498 */
5499 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5500 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5501 if (pVCpu->iem.s.cXcptRecursions == 0)
5502 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5503 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5504 else
5505 {
5506 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5507 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5508 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5509
5510 if (pVCpu->iem.s.cXcptRecursions >= 4)
5511 {
5512#ifdef DEBUG_bird
5513 AssertFailed();
5514#endif
5515 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5516 }
5517
5518 /*
5519 * Evaluate the sequence of recurring events.
5520 */
5521 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5522 NULL /* pXcptRaiseInfo */);
5523 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5524 { /* likely */ }
5525 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5526 {
5527 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5528 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5529 u8Vector = X86_XCPT_DF;
5530 uErr = 0;
5531#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5532 /* VMX nested-guest #DF intercept needs to be checked here. */
5533 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5534 {
5535 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5536 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5537 return rcStrict0;
5538 }
5539#endif
5540 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5541 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5542 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5543 }
5544 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5545 {
5546 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5547 return iemInitiateCpuShutdown(pVCpu);
5548 }
5549 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5550 {
5551 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5552 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5553 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5554 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5555 return VERR_EM_GUEST_CPU_HANG;
5556 }
5557 else
5558 {
5559 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5560 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5561 return VERR_IEM_IPE_9;
5562 }
5563
5564 /*
5565 * The 'EXT' bit is set when an exception occurs during deliver of an external
5566 * event (such as an interrupt or earlier exception)[1]. Privileged software
5567 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5568 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5569 *
5570 * [1] - Intel spec. 6.13 "Error Code"
5571 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5572 * [3] - Intel Instruction reference for INT n.
5573 */
5574 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5575 && (fFlags & IEM_XCPT_FLAGS_ERR)
5576 && u8Vector != X86_XCPT_PF
5577 && u8Vector != X86_XCPT_DF)
5578 {
5579 uErr |= X86_TRAP_ERR_EXTERNAL;
5580 }
5581 }
5582
5583 pVCpu->iem.s.cXcptRecursions++;
5584 pVCpu->iem.s.uCurXcpt = u8Vector;
5585 pVCpu->iem.s.fCurXcpt = fFlags;
5586 pVCpu->iem.s.uCurXcptErr = uErr;
5587 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5588
5589 /*
5590 * Extensive logging.
5591 */
5592#if defined(LOG_ENABLED) && defined(IN_RING3)
5593 if (LogIs3Enabled())
5594 {
5595 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5596 PVM pVM = pVCpu->CTX_SUFF(pVM);
5597 char szRegs[4096];
5598 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5599 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5600 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5601 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5602 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5603 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5604 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5605 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5606 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5607 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5608 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5609 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5610 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5611 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5612 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5613 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5614 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5615 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5616 " efer=%016VR{efer}\n"
5617 " pat=%016VR{pat}\n"
5618 " sf_mask=%016VR{sf_mask}\n"
5619 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5620 " lstar=%016VR{lstar}\n"
5621 " star=%016VR{star} cstar=%016VR{cstar}\n"
5622 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5623 );
5624
5625 char szInstr[256];
5626 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5627 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5628 szInstr, sizeof(szInstr), NULL);
5629 Log3(("%s%s\n", szRegs, szInstr));
5630 }
5631#endif /* LOG_ENABLED */
5632
5633 /*
5634 * Call the mode specific worker function.
5635 */
5636 VBOXSTRICTRC rcStrict;
5637 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5638 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5639 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5640 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5641 else
5642 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5643
5644 /* Flush the prefetch buffer. */
5645#ifdef IEM_WITH_CODE_TLB
5646 pVCpu->iem.s.pbInstrBuf = NULL;
5647#else
5648 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5649#endif
5650
5651 /*
5652 * Unwind.
5653 */
5654 pVCpu->iem.s.cXcptRecursions--;
5655 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5656 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5657 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5658 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5659 pVCpu->iem.s.cXcptRecursions + 1));
5660 return rcStrict;
5661}
5662
5663#ifdef IEM_WITH_SETJMP
5664/**
5665 * See iemRaiseXcptOrInt. Will not return.
5666 */
5667IEM_STATIC DECL_NO_RETURN(void)
5668iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5669 uint8_t cbInstr,
5670 uint8_t u8Vector,
5671 uint32_t fFlags,
5672 uint16_t uErr,
5673 uint64_t uCr2)
5674{
5675 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5676 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5677}
5678#endif
5679
5680
5681/** \#DE - 00. */
5682DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5683{
5684 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5685}
5686
5687
5688/** \#DB - 01.
5689 * @note This automatically clear DR7.GD. */
5690DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5691{
5692 /** @todo set/clear RF. */
5693 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5694 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5695}
5696
5697
5698/** \#BR - 05. */
5699DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5700{
5701 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5702}
5703
5704
5705/** \#UD - 06. */
5706DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5707{
5708 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5709}
5710
5711
5712/** \#NM - 07. */
5713DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5714{
5715 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5716}
5717
5718
5719/** \#TS(err) - 0a. */
5720DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5721{
5722 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5723}
5724
5725
5726/** \#TS(tr) - 0a. */
5727DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5728{
5729 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5730 pVCpu->cpum.GstCtx.tr.Sel, 0);
5731}
5732
5733
5734/** \#TS(0) - 0a. */
5735DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5736{
5737 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5738 0, 0);
5739}
5740
5741
5742/** \#TS(err) - 0a. */
5743DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5744{
5745 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5746 uSel & X86_SEL_MASK_OFF_RPL, 0);
5747}
5748
5749
5750/** \#NP(err) - 0b. */
5751DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5752{
5753 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5754}
5755
5756
5757/** \#NP(sel) - 0b. */
5758DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5759{
5760 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5761 uSel & ~X86_SEL_RPL, 0);
5762}
5763
5764
5765/** \#SS(seg) - 0c. */
5766DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5767{
5768 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5769 uSel & ~X86_SEL_RPL, 0);
5770}
5771
5772
5773/** \#SS(err) - 0c. */
5774DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5775{
5776 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5777}
5778
5779
5780/** \#GP(n) - 0d. */
5781DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5782{
5783 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5784}
5785
5786
5787/** \#GP(0) - 0d. */
5788DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5789{
5790 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5791}
5792
5793#ifdef IEM_WITH_SETJMP
5794/** \#GP(0) - 0d. */
5795DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5796{
5797 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5798}
5799#endif
5800
5801
5802/** \#GP(sel) - 0d. */
5803DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5804{
5805 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5806 Sel & ~X86_SEL_RPL, 0);
5807}
5808
5809
5810/** \#GP(0) - 0d. */
5811DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5812{
5813 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5814}
5815
5816
5817/** \#GP(sel) - 0d. */
5818DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5819{
5820 NOREF(iSegReg); NOREF(fAccess);
5821 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5822 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5823}
5824
5825#ifdef IEM_WITH_SETJMP
5826/** \#GP(sel) - 0d, longjmp. */
5827DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5828{
5829 NOREF(iSegReg); NOREF(fAccess);
5830 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5831 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5832}
5833#endif
5834
5835/** \#GP(sel) - 0d. */
5836DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5837{
5838 NOREF(Sel);
5839 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5840}
5841
5842#ifdef IEM_WITH_SETJMP
5843/** \#GP(sel) - 0d, longjmp. */
5844DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5845{
5846 NOREF(Sel);
5847 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5848}
5849#endif
5850
5851
5852/** \#GP(sel) - 0d. */
5853DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5854{
5855 NOREF(iSegReg); NOREF(fAccess);
5856 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5857}
5858
5859#ifdef IEM_WITH_SETJMP
5860/** \#GP(sel) - 0d, longjmp. */
5861DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5862 uint32_t fAccess)
5863{
5864 NOREF(iSegReg); NOREF(fAccess);
5865 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5866}
5867#endif
5868
5869
5870/** \#PF(n) - 0e. */
5871DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5872{
5873 uint16_t uErr;
5874 switch (rc)
5875 {
5876 case VERR_PAGE_NOT_PRESENT:
5877 case VERR_PAGE_TABLE_NOT_PRESENT:
5878 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5879 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5880 uErr = 0;
5881 break;
5882
5883 default:
5884 AssertMsgFailed(("%Rrc\n", rc));
5885 RT_FALL_THRU();
5886 case VERR_ACCESS_DENIED:
5887 uErr = X86_TRAP_PF_P;
5888 break;
5889
5890 /** @todo reserved */
5891 }
5892
5893 if (pVCpu->iem.s.uCpl == 3)
5894 uErr |= X86_TRAP_PF_US;
5895
5896 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5897 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5898 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5899 uErr |= X86_TRAP_PF_ID;
5900
5901#if 0 /* This is so much non-sense, really. Why was it done like that? */
5902 /* Note! RW access callers reporting a WRITE protection fault, will clear
5903 the READ flag before calling. So, read-modify-write accesses (RW)
5904 can safely be reported as READ faults. */
5905 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5906 uErr |= X86_TRAP_PF_RW;
5907#else
5908 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5909 {
5910 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
5911 /// (regardless of outcome of the comparison in the latter case).
5912 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
5913 uErr |= X86_TRAP_PF_RW;
5914 }
5915#endif
5916
5917 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5918 uErr, GCPtrWhere);
5919}
5920
5921#ifdef IEM_WITH_SETJMP
5922/** \#PF(n) - 0e, longjmp. */
5923IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5924{
5925 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5926}
5927#endif
5928
5929
5930/** \#MF(0) - 10. */
5931DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5932{
5933 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5934}
5935
5936
5937/** \#AC(0) - 11. */
5938DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5939{
5940 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5941}
5942
5943
5944/**
5945 * Macro for calling iemCImplRaiseDivideError().
5946 *
5947 * This enables us to add/remove arguments and force different levels of
5948 * inlining as we wish.
5949 *
5950 * @return Strict VBox status code.
5951 */
5952#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5953IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5954{
5955 NOREF(cbInstr);
5956 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5957}
5958
5959
5960/**
5961 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5962 *
5963 * This enables us to add/remove arguments and force different levels of
5964 * inlining as we wish.
5965 *
5966 * @return Strict VBox status code.
5967 */
5968#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5969IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5970{
5971 NOREF(cbInstr);
5972 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5973}
5974
5975
5976/**
5977 * Macro for calling iemCImplRaiseInvalidOpcode().
5978 *
5979 * This enables us to add/remove arguments and force different levels of
5980 * inlining as we wish.
5981 *
5982 * @return Strict VBox status code.
5983 */
5984#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5985IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5986{
5987 NOREF(cbInstr);
5988 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5989}
5990
5991
5992/** @} */
5993
5994
5995/*
5996 *
5997 * Helpers routines.
5998 * Helpers routines.
5999 * Helpers routines.
6000 *
6001 */
6002
6003/**
6004 * Recalculates the effective operand size.
6005 *
6006 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6007 */
6008IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6009{
6010 switch (pVCpu->iem.s.enmCpuMode)
6011 {
6012 case IEMMODE_16BIT:
6013 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6014 break;
6015 case IEMMODE_32BIT:
6016 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6017 break;
6018 case IEMMODE_64BIT:
6019 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6020 {
6021 case 0:
6022 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6023 break;
6024 case IEM_OP_PRF_SIZE_OP:
6025 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6026 break;
6027 case IEM_OP_PRF_SIZE_REX_W:
6028 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6029 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6030 break;
6031 }
6032 break;
6033 default:
6034 AssertFailed();
6035 }
6036}
6037
6038
6039/**
6040 * Sets the default operand size to 64-bit and recalculates the effective
6041 * operand size.
6042 *
6043 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6044 */
6045IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6046{
6047 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6048 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6049 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6050 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6051 else
6052 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6053}
6054
6055
6056/*
6057 *
6058 * Common opcode decoders.
6059 * Common opcode decoders.
6060 * Common opcode decoders.
6061 *
6062 */
6063//#include <iprt/mem.h>
6064
6065/**
6066 * Used to add extra details about a stub case.
6067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6068 */
6069IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6070{
6071#if defined(LOG_ENABLED) && defined(IN_RING3)
6072 PVM pVM = pVCpu->CTX_SUFF(pVM);
6073 char szRegs[4096];
6074 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6075 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6076 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6077 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6078 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6079 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6080 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6081 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6082 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6083 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6084 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6085 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6086 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6087 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6088 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6089 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6090 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6091 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6092 " efer=%016VR{efer}\n"
6093 " pat=%016VR{pat}\n"
6094 " sf_mask=%016VR{sf_mask}\n"
6095 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6096 " lstar=%016VR{lstar}\n"
6097 " star=%016VR{star} cstar=%016VR{cstar}\n"
6098 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6099 );
6100
6101 char szInstr[256];
6102 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6103 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6104 szInstr, sizeof(szInstr), NULL);
6105
6106 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6107#else
6108 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6109#endif
6110}
6111
6112/**
6113 * Complains about a stub.
6114 *
6115 * Providing two versions of this macro, one for daily use and one for use when
6116 * working on IEM.
6117 */
6118#if 0
6119# define IEMOP_BITCH_ABOUT_STUB() \
6120 do { \
6121 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6122 iemOpStubMsg2(pVCpu); \
6123 RTAssertPanic(); \
6124 } while (0)
6125#else
6126# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6127#endif
6128
6129/** Stubs an opcode. */
6130#define FNIEMOP_STUB(a_Name) \
6131 FNIEMOP_DEF(a_Name) \
6132 { \
6133 RT_NOREF_PV(pVCpu); \
6134 IEMOP_BITCH_ABOUT_STUB(); \
6135 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6136 } \
6137 typedef int ignore_semicolon
6138
6139/** Stubs an opcode. */
6140#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6141 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6142 { \
6143 RT_NOREF_PV(pVCpu); \
6144 RT_NOREF_PV(a_Name0); \
6145 IEMOP_BITCH_ABOUT_STUB(); \
6146 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6147 } \
6148 typedef int ignore_semicolon
6149
6150/** Stubs an opcode which currently should raise \#UD. */
6151#define FNIEMOP_UD_STUB(a_Name) \
6152 FNIEMOP_DEF(a_Name) \
6153 { \
6154 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6155 return IEMOP_RAISE_INVALID_OPCODE(); \
6156 } \
6157 typedef int ignore_semicolon
6158
6159/** Stubs an opcode which currently should raise \#UD. */
6160#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6161 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6162 { \
6163 RT_NOREF_PV(pVCpu); \
6164 RT_NOREF_PV(a_Name0); \
6165 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6166 return IEMOP_RAISE_INVALID_OPCODE(); \
6167 } \
6168 typedef int ignore_semicolon
6169
6170
6171
6172/** @name Register Access.
6173 * @{
6174 */
6175
6176/**
6177 * Gets a reference (pointer) to the specified hidden segment register.
6178 *
6179 * @returns Hidden register reference.
6180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6181 * @param iSegReg The segment register.
6182 */
6183IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6184{
6185 Assert(iSegReg < X86_SREG_COUNT);
6186 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6187 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6188
6189 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6190 return pSReg;
6191}
6192
6193
6194/**
6195 * Ensures that the given hidden segment register is up to date.
6196 *
6197 * @returns Hidden register reference.
6198 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6199 * @param pSReg The segment register.
6200 */
6201IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6202{
6203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6204 NOREF(pVCpu);
6205 return pSReg;
6206}
6207
6208
6209/**
6210 * Gets a reference (pointer) to the specified segment register (the selector
6211 * value).
6212 *
6213 * @returns Pointer to the selector variable.
6214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6215 * @param iSegReg The segment register.
6216 */
6217DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6218{
6219 Assert(iSegReg < X86_SREG_COUNT);
6220 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6221 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6222}
6223
6224
6225/**
6226 * Fetches the selector value of a segment register.
6227 *
6228 * @returns The selector value.
6229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6230 * @param iSegReg The segment register.
6231 */
6232DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6233{
6234 Assert(iSegReg < X86_SREG_COUNT);
6235 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6236 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6237}
6238
6239
6240/**
6241 * Fetches the base address value of a segment register.
6242 *
6243 * @returns The selector value.
6244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6245 * @param iSegReg The segment register.
6246 */
6247DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6248{
6249 Assert(iSegReg < X86_SREG_COUNT);
6250 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6251 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6252}
6253
6254
6255/**
6256 * Gets a reference (pointer) to the specified general purpose register.
6257 *
6258 * @returns Register reference.
6259 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6260 * @param iReg The general purpose register.
6261 */
6262DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6263{
6264 Assert(iReg < 16);
6265 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6266}
6267
6268
6269/**
6270 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6271 *
6272 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6273 *
6274 * @returns Register reference.
6275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6276 * @param iReg The register.
6277 */
6278DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6279{
6280 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6281 {
6282 Assert(iReg < 16);
6283 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6284 }
6285 /* high 8-bit register. */
6286 Assert(iReg < 8);
6287 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6288}
6289
6290
6291/**
6292 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6293 *
6294 * @returns Register reference.
6295 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6296 * @param iReg The register.
6297 */
6298DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6299{
6300 Assert(iReg < 16);
6301 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6302}
6303
6304
6305/**
6306 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6307 *
6308 * @returns Register reference.
6309 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6310 * @param iReg The register.
6311 */
6312DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6313{
6314 Assert(iReg < 16);
6315 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6316}
6317
6318
6319/**
6320 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6321 *
6322 * @returns Register reference.
6323 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6324 * @param iReg The register.
6325 */
6326DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6327{
6328 Assert(iReg < 64);
6329 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6330}
6331
6332
6333/**
6334 * Gets a reference (pointer) to the specified segment register's base address.
6335 *
6336 * @returns Segment register base address reference.
6337 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6338 * @param iSegReg The segment selector.
6339 */
6340DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6341{
6342 Assert(iSegReg < X86_SREG_COUNT);
6343 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6344 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6345}
6346
6347
6348/**
6349 * Fetches the value of a 8-bit general purpose register.
6350 *
6351 * @returns The register value.
6352 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6353 * @param iReg The register.
6354 */
6355DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6356{
6357 return *iemGRegRefU8(pVCpu, iReg);
6358}
6359
6360
6361/**
6362 * Fetches the value of a 16-bit general purpose register.
6363 *
6364 * @returns The register value.
6365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6366 * @param iReg The register.
6367 */
6368DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6369{
6370 Assert(iReg < 16);
6371 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6372}
6373
6374
6375/**
6376 * Fetches the value of a 32-bit general purpose register.
6377 *
6378 * @returns The register value.
6379 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6380 * @param iReg The register.
6381 */
6382DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6383{
6384 Assert(iReg < 16);
6385 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6386}
6387
6388
6389/**
6390 * Fetches the value of a 64-bit general purpose register.
6391 *
6392 * @returns The register value.
6393 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6394 * @param iReg The register.
6395 */
6396DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6397{
6398 Assert(iReg < 16);
6399 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6400}
6401
6402
6403/**
6404 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6405 *
6406 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6407 * segment limit.
6408 *
6409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6410 * @param offNextInstr The offset of the next instruction.
6411 */
6412IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6413{
6414 switch (pVCpu->iem.s.enmEffOpSize)
6415 {
6416 case IEMMODE_16BIT:
6417 {
6418 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6419 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6420 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6421 return iemRaiseGeneralProtectionFault0(pVCpu);
6422 pVCpu->cpum.GstCtx.rip = uNewIp;
6423 break;
6424 }
6425
6426 case IEMMODE_32BIT:
6427 {
6428 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6429 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6430
6431 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6432 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6433 return iemRaiseGeneralProtectionFault0(pVCpu);
6434 pVCpu->cpum.GstCtx.rip = uNewEip;
6435 break;
6436 }
6437
6438 case IEMMODE_64BIT:
6439 {
6440 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6441
6442 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6443 if (!IEM_IS_CANONICAL(uNewRip))
6444 return iemRaiseGeneralProtectionFault0(pVCpu);
6445 pVCpu->cpum.GstCtx.rip = uNewRip;
6446 break;
6447 }
6448
6449 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6450 }
6451
6452 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6453
6454#ifndef IEM_WITH_CODE_TLB
6455 /* Flush the prefetch buffer. */
6456 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6457#endif
6458
6459 return VINF_SUCCESS;
6460}
6461
6462
6463/**
6464 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6465 *
6466 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6467 * segment limit.
6468 *
6469 * @returns Strict VBox status code.
6470 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6471 * @param offNextInstr The offset of the next instruction.
6472 */
6473IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6474{
6475 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6476
6477 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6478 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6479 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6480 return iemRaiseGeneralProtectionFault0(pVCpu);
6481 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6482 pVCpu->cpum.GstCtx.rip = uNewIp;
6483 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6484
6485#ifndef IEM_WITH_CODE_TLB
6486 /* Flush the prefetch buffer. */
6487 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6488#endif
6489
6490 return VINF_SUCCESS;
6491}
6492
6493
6494/**
6495 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6496 *
6497 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6498 * segment limit.
6499 *
6500 * @returns Strict VBox status code.
6501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6502 * @param offNextInstr The offset of the next instruction.
6503 */
6504IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6505{
6506 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6507
6508 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6509 {
6510 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6511
6512 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6513 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6514 return iemRaiseGeneralProtectionFault0(pVCpu);
6515 pVCpu->cpum.GstCtx.rip = uNewEip;
6516 }
6517 else
6518 {
6519 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6520
6521 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6522 if (!IEM_IS_CANONICAL(uNewRip))
6523 return iemRaiseGeneralProtectionFault0(pVCpu);
6524 pVCpu->cpum.GstCtx.rip = uNewRip;
6525 }
6526 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6527
6528#ifndef IEM_WITH_CODE_TLB
6529 /* Flush the prefetch buffer. */
6530 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6531#endif
6532
6533 return VINF_SUCCESS;
6534}
6535
6536
6537/**
6538 * Performs a near jump to the specified address.
6539 *
6540 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6541 * segment limit.
6542 *
6543 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6544 * @param uNewRip The new RIP value.
6545 */
6546IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6547{
6548 switch (pVCpu->iem.s.enmEffOpSize)
6549 {
6550 case IEMMODE_16BIT:
6551 {
6552 Assert(uNewRip <= UINT16_MAX);
6553 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6554 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6555 return iemRaiseGeneralProtectionFault0(pVCpu);
6556 /** @todo Test 16-bit jump in 64-bit mode. */
6557 pVCpu->cpum.GstCtx.rip = uNewRip;
6558 break;
6559 }
6560
6561 case IEMMODE_32BIT:
6562 {
6563 Assert(uNewRip <= UINT32_MAX);
6564 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6565 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6566
6567 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6568 return iemRaiseGeneralProtectionFault0(pVCpu);
6569 pVCpu->cpum.GstCtx.rip = uNewRip;
6570 break;
6571 }
6572
6573 case IEMMODE_64BIT:
6574 {
6575 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6576
6577 if (!IEM_IS_CANONICAL(uNewRip))
6578 return iemRaiseGeneralProtectionFault0(pVCpu);
6579 pVCpu->cpum.GstCtx.rip = uNewRip;
6580 break;
6581 }
6582
6583 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6584 }
6585
6586 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6587
6588#ifndef IEM_WITH_CODE_TLB
6589 /* Flush the prefetch buffer. */
6590 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6591#endif
6592
6593 return VINF_SUCCESS;
6594}
6595
6596
6597/**
6598 * Get the address of the top of the stack.
6599 *
6600 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6601 */
6602DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6603{
6604 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6605 return pVCpu->cpum.GstCtx.rsp;
6606 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6607 return pVCpu->cpum.GstCtx.esp;
6608 return pVCpu->cpum.GstCtx.sp;
6609}
6610
6611
6612/**
6613 * Updates the RIP/EIP/IP to point to the next instruction.
6614 *
6615 * This function leaves the EFLAGS.RF flag alone.
6616 *
6617 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6618 * @param cbInstr The number of bytes to add.
6619 */
6620IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6621{
6622 switch (pVCpu->iem.s.enmCpuMode)
6623 {
6624 case IEMMODE_16BIT:
6625 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6626 pVCpu->cpum.GstCtx.eip += cbInstr;
6627 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6628 break;
6629
6630 case IEMMODE_32BIT:
6631 pVCpu->cpum.GstCtx.eip += cbInstr;
6632 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6633 break;
6634
6635 case IEMMODE_64BIT:
6636 pVCpu->cpum.GstCtx.rip += cbInstr;
6637 break;
6638 default: AssertFailed();
6639 }
6640}
6641
6642
6643#if 0
6644/**
6645 * Updates the RIP/EIP/IP to point to the next instruction.
6646 *
6647 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6648 */
6649IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6650{
6651 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6652}
6653#endif
6654
6655
6656
6657/**
6658 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6659 *
6660 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6661 * @param cbInstr The number of bytes to add.
6662 */
6663IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6664{
6665 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6666
6667 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6668#if ARCH_BITS >= 64
6669 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6670 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6671 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6672#else
6673 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6674 pVCpu->cpum.GstCtx.rip += cbInstr;
6675 else
6676 pVCpu->cpum.GstCtx.eip += cbInstr;
6677#endif
6678}
6679
6680
6681/**
6682 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6683 *
6684 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6685 */
6686IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6687{
6688 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6689}
6690
6691
6692/**
6693 * Adds to the stack pointer.
6694 *
6695 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6696 * @param cbToAdd The number of bytes to add (8-bit!).
6697 */
6698DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6699{
6700 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6701 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6702 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6703 pVCpu->cpum.GstCtx.esp += cbToAdd;
6704 else
6705 pVCpu->cpum.GstCtx.sp += cbToAdd;
6706}
6707
6708
6709/**
6710 * Subtracts from the stack pointer.
6711 *
6712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6713 * @param cbToSub The number of bytes to subtract (8-bit!).
6714 */
6715DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6716{
6717 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6718 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6719 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6720 pVCpu->cpum.GstCtx.esp -= cbToSub;
6721 else
6722 pVCpu->cpum.GstCtx.sp -= cbToSub;
6723}
6724
6725
6726/**
6727 * Adds to the temporary stack pointer.
6728 *
6729 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6730 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6731 * @param cbToAdd The number of bytes to add (16-bit).
6732 */
6733DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6734{
6735 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6736 pTmpRsp->u += cbToAdd;
6737 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6738 pTmpRsp->DWords.dw0 += cbToAdd;
6739 else
6740 pTmpRsp->Words.w0 += cbToAdd;
6741}
6742
6743
6744/**
6745 * Subtracts from the temporary stack pointer.
6746 *
6747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6748 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6749 * @param cbToSub The number of bytes to subtract.
6750 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6751 * expecting that.
6752 */
6753DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6754{
6755 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6756 pTmpRsp->u -= cbToSub;
6757 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6758 pTmpRsp->DWords.dw0 -= cbToSub;
6759 else
6760 pTmpRsp->Words.w0 -= cbToSub;
6761}
6762
6763
6764/**
6765 * Calculates the effective stack address for a push of the specified size as
6766 * well as the new RSP value (upper bits may be masked).
6767 *
6768 * @returns Effective stack addressf for the push.
6769 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6770 * @param cbItem The size of the stack item to pop.
6771 * @param puNewRsp Where to return the new RSP value.
6772 */
6773DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6774{
6775 RTUINT64U uTmpRsp;
6776 RTGCPTR GCPtrTop;
6777 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6778
6779 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6780 GCPtrTop = uTmpRsp.u -= cbItem;
6781 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6782 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6783 else
6784 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6785 *puNewRsp = uTmpRsp.u;
6786 return GCPtrTop;
6787}
6788
6789
6790/**
6791 * Gets the current stack pointer and calculates the value after a pop of the
6792 * specified size.
6793 *
6794 * @returns Current stack pointer.
6795 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6796 * @param cbItem The size of the stack item to pop.
6797 * @param puNewRsp Where to return the new RSP value.
6798 */
6799DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6800{
6801 RTUINT64U uTmpRsp;
6802 RTGCPTR GCPtrTop;
6803 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6804
6805 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6806 {
6807 GCPtrTop = uTmpRsp.u;
6808 uTmpRsp.u += cbItem;
6809 }
6810 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6811 {
6812 GCPtrTop = uTmpRsp.DWords.dw0;
6813 uTmpRsp.DWords.dw0 += cbItem;
6814 }
6815 else
6816 {
6817 GCPtrTop = uTmpRsp.Words.w0;
6818 uTmpRsp.Words.w0 += cbItem;
6819 }
6820 *puNewRsp = uTmpRsp.u;
6821 return GCPtrTop;
6822}
6823
6824
6825/**
6826 * Calculates the effective stack address for a push of the specified size as
6827 * well as the new temporary RSP value (upper bits may be masked).
6828 *
6829 * @returns Effective stack addressf for the push.
6830 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6831 * @param pTmpRsp The temporary stack pointer. This is updated.
6832 * @param cbItem The size of the stack item to pop.
6833 */
6834DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6835{
6836 RTGCPTR GCPtrTop;
6837
6838 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6839 GCPtrTop = pTmpRsp->u -= cbItem;
6840 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6841 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6842 else
6843 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6844 return GCPtrTop;
6845}
6846
6847
6848/**
6849 * Gets the effective stack address for a pop of the specified size and
6850 * calculates and updates the temporary RSP.
6851 *
6852 * @returns Current stack pointer.
6853 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6854 * @param pTmpRsp The temporary stack pointer. This is updated.
6855 * @param cbItem The size of the stack item to pop.
6856 */
6857DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6858{
6859 RTGCPTR GCPtrTop;
6860 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6861 {
6862 GCPtrTop = pTmpRsp->u;
6863 pTmpRsp->u += cbItem;
6864 }
6865 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6866 {
6867 GCPtrTop = pTmpRsp->DWords.dw0;
6868 pTmpRsp->DWords.dw0 += cbItem;
6869 }
6870 else
6871 {
6872 GCPtrTop = pTmpRsp->Words.w0;
6873 pTmpRsp->Words.w0 += cbItem;
6874 }
6875 return GCPtrTop;
6876}
6877
6878/** @} */
6879
6880
6881/** @name FPU access and helpers.
6882 *
6883 * @{
6884 */
6885
6886
6887/**
6888 * Hook for preparing to use the host FPU.
6889 *
6890 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6891 *
6892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6893 */
6894DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6895{
6896#ifdef IN_RING3
6897 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6898#else
6899 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6900#endif
6901 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6902}
6903
6904
6905/**
6906 * Hook for preparing to use the host FPU for SSE.
6907 *
6908 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6909 *
6910 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6911 */
6912DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6913{
6914 iemFpuPrepareUsage(pVCpu);
6915}
6916
6917
6918/**
6919 * Hook for preparing to use the host FPU for AVX.
6920 *
6921 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6922 *
6923 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6924 */
6925DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6926{
6927 iemFpuPrepareUsage(pVCpu);
6928}
6929
6930
6931/**
6932 * Hook for actualizing the guest FPU state before the interpreter reads it.
6933 *
6934 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6935 *
6936 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6937 */
6938DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6939{
6940#ifdef IN_RING3
6941 NOREF(pVCpu);
6942#else
6943 CPUMRZFpuStateActualizeForRead(pVCpu);
6944#endif
6945 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6946}
6947
6948
6949/**
6950 * Hook for actualizing the guest FPU state before the interpreter changes it.
6951 *
6952 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6953 *
6954 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6955 */
6956DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6957{
6958#ifdef IN_RING3
6959 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6960#else
6961 CPUMRZFpuStateActualizeForChange(pVCpu);
6962#endif
6963 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6964}
6965
6966
6967/**
6968 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6969 * only.
6970 *
6971 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6972 *
6973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6974 */
6975DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6976{
6977#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6978 NOREF(pVCpu);
6979#else
6980 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6981#endif
6982 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6983}
6984
6985
6986/**
6987 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6988 * read+write.
6989 *
6990 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6991 *
6992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6993 */
6994DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
6995{
6996#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6997 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6998#else
6999 CPUMRZFpuStateActualizeForChange(pVCpu);
7000#endif
7001 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7002
7003 /* Make sure any changes are loaded the next time around. */
7004 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->Hdr.bmXState |= XSAVE_C_SSE;
7005}
7006
7007
7008/**
7009 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7010 * only.
7011 *
7012 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7013 *
7014 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7015 */
7016DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7017{
7018#ifdef IN_RING3
7019 NOREF(pVCpu);
7020#else
7021 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7022#endif
7023 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7024}
7025
7026
7027/**
7028 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7029 * read+write.
7030 *
7031 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7032 *
7033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7034 */
7035DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7036{
7037#ifdef IN_RING3
7038 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7039#else
7040 CPUMRZFpuStateActualizeForChange(pVCpu);
7041#endif
7042 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7043
7044 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7045 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7046}
7047
7048
7049/**
7050 * Stores a QNaN value into a FPU register.
7051 *
7052 * @param pReg Pointer to the register.
7053 */
7054DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7055{
7056 pReg->au32[0] = UINT32_C(0x00000000);
7057 pReg->au32[1] = UINT32_C(0xc0000000);
7058 pReg->au16[4] = UINT16_C(0xffff);
7059}
7060
7061
7062/**
7063 * Updates the FOP, FPU.CS and FPUIP registers.
7064 *
7065 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7066 * @param pFpuCtx The FPU context.
7067 */
7068DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7069{
7070 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7071 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7072 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7073 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7074 {
7075 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7076 * happens in real mode here based on the fnsave and fnstenv images. */
7077 pFpuCtx->CS = 0;
7078 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7079 }
7080 else
7081 {
7082 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7083 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7084 }
7085}
7086
7087
7088/**
7089 * Updates the x87.DS and FPUDP registers.
7090 *
7091 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7092 * @param pFpuCtx The FPU context.
7093 * @param iEffSeg The effective segment register.
7094 * @param GCPtrEff The effective address relative to @a iEffSeg.
7095 */
7096DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7097{
7098 RTSEL sel;
7099 switch (iEffSeg)
7100 {
7101 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7102 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7103 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7104 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7105 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7106 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7107 default:
7108 AssertMsgFailed(("%d\n", iEffSeg));
7109 sel = pVCpu->cpum.GstCtx.ds.Sel;
7110 }
7111 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7112 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7113 {
7114 pFpuCtx->DS = 0;
7115 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7116 }
7117 else
7118 {
7119 pFpuCtx->DS = sel;
7120 pFpuCtx->FPUDP = GCPtrEff;
7121 }
7122}
7123
7124
7125/**
7126 * Rotates the stack registers in the push direction.
7127 *
7128 * @param pFpuCtx The FPU context.
7129 * @remarks This is a complete waste of time, but fxsave stores the registers in
7130 * stack order.
7131 */
7132DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7133{
7134 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7135 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7136 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7137 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7138 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7139 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7140 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7141 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7142 pFpuCtx->aRegs[0].r80 = r80Tmp;
7143}
7144
7145
7146/**
7147 * Rotates the stack registers in the pop direction.
7148 *
7149 * @param pFpuCtx The FPU context.
7150 * @remarks This is a complete waste of time, but fxsave stores the registers in
7151 * stack order.
7152 */
7153DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7154{
7155 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7156 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7157 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7158 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7159 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7160 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7161 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7162 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7163 pFpuCtx->aRegs[7].r80 = r80Tmp;
7164}
7165
7166
7167/**
7168 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7169 * exception prevents it.
7170 *
7171 * @param pResult The FPU operation result to push.
7172 * @param pFpuCtx The FPU context.
7173 */
7174IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7175{
7176 /* Update FSW and bail if there are pending exceptions afterwards. */
7177 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7178 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7179 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7180 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7181 {
7182 pFpuCtx->FSW = fFsw;
7183 return;
7184 }
7185
7186 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7187 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7188 {
7189 /* All is fine, push the actual value. */
7190 pFpuCtx->FTW |= RT_BIT(iNewTop);
7191 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7192 }
7193 else if (pFpuCtx->FCW & X86_FCW_IM)
7194 {
7195 /* Masked stack overflow, push QNaN. */
7196 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7197 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7198 }
7199 else
7200 {
7201 /* Raise stack overflow, don't push anything. */
7202 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7203 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7204 return;
7205 }
7206
7207 fFsw &= ~X86_FSW_TOP_MASK;
7208 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7209 pFpuCtx->FSW = fFsw;
7210
7211 iemFpuRotateStackPush(pFpuCtx);
7212}
7213
7214
7215/**
7216 * Stores a result in a FPU register and updates the FSW and FTW.
7217 *
7218 * @param pFpuCtx The FPU context.
7219 * @param pResult The result to store.
7220 * @param iStReg Which FPU register to store it in.
7221 */
7222IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7223{
7224 Assert(iStReg < 8);
7225 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7226 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7227 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7228 pFpuCtx->FTW |= RT_BIT(iReg);
7229 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7230}
7231
7232
7233/**
7234 * Only updates the FPU status word (FSW) with the result of the current
7235 * instruction.
7236 *
7237 * @param pFpuCtx The FPU context.
7238 * @param u16FSW The FSW output of the current instruction.
7239 */
7240IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7241{
7242 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7243 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7244}
7245
7246
7247/**
7248 * Pops one item off the FPU stack if no pending exception prevents it.
7249 *
7250 * @param pFpuCtx The FPU context.
7251 */
7252IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7253{
7254 /* Check pending exceptions. */
7255 uint16_t uFSW = pFpuCtx->FSW;
7256 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7257 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7258 return;
7259
7260 /* TOP--. */
7261 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7262 uFSW &= ~X86_FSW_TOP_MASK;
7263 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7264 pFpuCtx->FSW = uFSW;
7265
7266 /* Mark the previous ST0 as empty. */
7267 iOldTop >>= X86_FSW_TOP_SHIFT;
7268 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7269
7270 /* Rotate the registers. */
7271 iemFpuRotateStackPop(pFpuCtx);
7272}
7273
7274
7275/**
7276 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7277 *
7278 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7279 * @param pResult The FPU operation result to push.
7280 */
7281IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7282{
7283 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7284 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7285 iemFpuMaybePushResult(pResult, pFpuCtx);
7286}
7287
7288
7289/**
7290 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7291 * and sets FPUDP and FPUDS.
7292 *
7293 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7294 * @param pResult The FPU operation result to push.
7295 * @param iEffSeg The effective segment register.
7296 * @param GCPtrEff The effective address relative to @a iEffSeg.
7297 */
7298IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7299{
7300 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7301 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7302 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7303 iemFpuMaybePushResult(pResult, pFpuCtx);
7304}
7305
7306
7307/**
7308 * Replace ST0 with the first value and push the second onto the FPU stack,
7309 * unless a pending exception prevents it.
7310 *
7311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7312 * @param pResult The FPU operation result to store and push.
7313 */
7314IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7315{
7316 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7317 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7318
7319 /* Update FSW and bail if there are pending exceptions afterwards. */
7320 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7321 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7322 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7323 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7324 {
7325 pFpuCtx->FSW = fFsw;
7326 return;
7327 }
7328
7329 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7330 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7331 {
7332 /* All is fine, push the actual value. */
7333 pFpuCtx->FTW |= RT_BIT(iNewTop);
7334 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7335 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7336 }
7337 else if (pFpuCtx->FCW & X86_FCW_IM)
7338 {
7339 /* Masked stack overflow, push QNaN. */
7340 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7341 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7342 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7343 }
7344 else
7345 {
7346 /* Raise stack overflow, don't push anything. */
7347 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7348 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7349 return;
7350 }
7351
7352 fFsw &= ~X86_FSW_TOP_MASK;
7353 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7354 pFpuCtx->FSW = fFsw;
7355
7356 iemFpuRotateStackPush(pFpuCtx);
7357}
7358
7359
7360/**
7361 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7362 * FOP.
7363 *
7364 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7365 * @param pResult The result to store.
7366 * @param iStReg Which FPU register to store it in.
7367 */
7368IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7369{
7370 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7371 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7372 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7373}
7374
7375
7376/**
7377 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7378 * FOP, and then pops the stack.
7379 *
7380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7381 * @param pResult The result to store.
7382 * @param iStReg Which FPU register to store it in.
7383 */
7384IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7385{
7386 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7387 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7388 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7389 iemFpuMaybePopOne(pFpuCtx);
7390}
7391
7392
7393/**
7394 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7395 * FPUDP, and FPUDS.
7396 *
7397 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7398 * @param pResult The result to store.
7399 * @param iStReg Which FPU register to store it in.
7400 * @param iEffSeg The effective memory operand selector register.
7401 * @param GCPtrEff The effective memory operand offset.
7402 */
7403IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7404 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7405{
7406 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7407 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7408 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7409 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7410}
7411
7412
7413/**
7414 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7415 * FPUDP, and FPUDS, and then pops the stack.
7416 *
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 * @param pResult The result to store.
7419 * @param iStReg Which FPU register to store it in.
7420 * @param iEffSeg The effective memory operand selector register.
7421 * @param GCPtrEff The effective memory operand offset.
7422 */
7423IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7424 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7425{
7426 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7427 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7428 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7429 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7430 iemFpuMaybePopOne(pFpuCtx);
7431}
7432
7433
7434/**
7435 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7436 *
7437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7438 */
7439IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7440{
7441 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7442 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7443}
7444
7445
7446/**
7447 * Marks the specified stack register as free (for FFREE).
7448 *
7449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7450 * @param iStReg The register to free.
7451 */
7452IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7453{
7454 Assert(iStReg < 8);
7455 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7456 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7457 pFpuCtx->FTW &= ~RT_BIT(iReg);
7458}
7459
7460
7461/**
7462 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7463 *
7464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7465 */
7466IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7467{
7468 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7469 uint16_t uFsw = pFpuCtx->FSW;
7470 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7471 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7472 uFsw &= ~X86_FSW_TOP_MASK;
7473 uFsw |= uTop;
7474 pFpuCtx->FSW = uFsw;
7475}
7476
7477
7478/**
7479 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7480 *
7481 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7482 */
7483IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7484{
7485 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7486 uint16_t uFsw = pFpuCtx->FSW;
7487 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7488 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7489 uFsw &= ~X86_FSW_TOP_MASK;
7490 uFsw |= uTop;
7491 pFpuCtx->FSW = uFsw;
7492}
7493
7494
7495/**
7496 * Updates the FSW, FOP, FPUIP, and FPUCS.
7497 *
7498 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7499 * @param u16FSW The FSW from the current instruction.
7500 */
7501IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7502{
7503 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7504 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7505 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7506}
7507
7508
7509/**
7510 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7511 *
7512 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7513 * @param u16FSW The FSW from the current instruction.
7514 */
7515IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7516{
7517 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7518 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7519 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7520 iemFpuMaybePopOne(pFpuCtx);
7521}
7522
7523
7524/**
7525 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7526 *
7527 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7528 * @param u16FSW The FSW from the current instruction.
7529 * @param iEffSeg The effective memory operand selector register.
7530 * @param GCPtrEff The effective memory operand offset.
7531 */
7532IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7533{
7534 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7535 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7536 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7537 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7538}
7539
7540
7541/**
7542 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7543 *
7544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7545 * @param u16FSW The FSW from the current instruction.
7546 */
7547IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7548{
7549 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7550 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7551 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7552 iemFpuMaybePopOne(pFpuCtx);
7553 iemFpuMaybePopOne(pFpuCtx);
7554}
7555
7556
7557/**
7558 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7559 *
7560 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7561 * @param u16FSW The FSW from the current instruction.
7562 * @param iEffSeg The effective memory operand selector register.
7563 * @param GCPtrEff The effective memory operand offset.
7564 */
7565IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7566{
7567 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7568 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7569 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7570 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7571 iemFpuMaybePopOne(pFpuCtx);
7572}
7573
7574
7575/**
7576 * Worker routine for raising an FPU stack underflow exception.
7577 *
7578 * @param pFpuCtx The FPU context.
7579 * @param iStReg The stack register being accessed.
7580 */
7581IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7582{
7583 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7584 if (pFpuCtx->FCW & X86_FCW_IM)
7585 {
7586 /* Masked underflow. */
7587 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7588 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7589 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7590 if (iStReg != UINT8_MAX)
7591 {
7592 pFpuCtx->FTW |= RT_BIT(iReg);
7593 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7594 }
7595 }
7596 else
7597 {
7598 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7599 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7600 }
7601}
7602
7603
7604/**
7605 * Raises a FPU stack underflow exception.
7606 *
7607 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7608 * @param iStReg The destination register that should be loaded
7609 * with QNaN if \#IS is not masked. Specify
7610 * UINT8_MAX if none (like for fcom).
7611 */
7612DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7613{
7614 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7615 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7616 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7617}
7618
7619
7620DECL_NO_INLINE(IEM_STATIC, void)
7621iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7622{
7623 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7624 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7625 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7626 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7627}
7628
7629
7630DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7631{
7632 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7633 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7634 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7635 iemFpuMaybePopOne(pFpuCtx);
7636}
7637
7638
7639DECL_NO_INLINE(IEM_STATIC, void)
7640iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7641{
7642 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7643 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7644 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7645 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7646 iemFpuMaybePopOne(pFpuCtx);
7647}
7648
7649
7650DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7651{
7652 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7653 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7654 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7655 iemFpuMaybePopOne(pFpuCtx);
7656 iemFpuMaybePopOne(pFpuCtx);
7657}
7658
7659
7660DECL_NO_INLINE(IEM_STATIC, void)
7661iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7662{
7663 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7664 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7665
7666 if (pFpuCtx->FCW & X86_FCW_IM)
7667 {
7668 /* Masked overflow - Push QNaN. */
7669 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7670 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7671 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7672 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7673 pFpuCtx->FTW |= RT_BIT(iNewTop);
7674 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7675 iemFpuRotateStackPush(pFpuCtx);
7676 }
7677 else
7678 {
7679 /* Exception pending - don't change TOP or the register stack. */
7680 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7681 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7682 }
7683}
7684
7685
7686DECL_NO_INLINE(IEM_STATIC, void)
7687iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7688{
7689 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7690 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7691
7692 if (pFpuCtx->FCW & X86_FCW_IM)
7693 {
7694 /* Masked overflow - Push QNaN. */
7695 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7696 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7697 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7698 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7699 pFpuCtx->FTW |= RT_BIT(iNewTop);
7700 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7701 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7702 iemFpuRotateStackPush(pFpuCtx);
7703 }
7704 else
7705 {
7706 /* Exception pending - don't change TOP or the register stack. */
7707 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7708 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7709 }
7710}
7711
7712
7713/**
7714 * Worker routine for raising an FPU stack overflow exception on a push.
7715 *
7716 * @param pFpuCtx The FPU context.
7717 */
7718IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7719{
7720 if (pFpuCtx->FCW & X86_FCW_IM)
7721 {
7722 /* Masked overflow. */
7723 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7724 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7725 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7726 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7727 pFpuCtx->FTW |= RT_BIT(iNewTop);
7728 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7729 iemFpuRotateStackPush(pFpuCtx);
7730 }
7731 else
7732 {
7733 /* Exception pending - don't change TOP or the register stack. */
7734 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7735 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7736 }
7737}
7738
7739
7740/**
7741 * Raises a FPU stack overflow exception on a push.
7742 *
7743 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7744 */
7745DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7746{
7747 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7748 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7749 iemFpuStackPushOverflowOnly(pFpuCtx);
7750}
7751
7752
7753/**
7754 * Raises a FPU stack overflow exception on a push with a memory operand.
7755 *
7756 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7757 * @param iEffSeg The effective memory operand selector register.
7758 * @param GCPtrEff The effective memory operand offset.
7759 */
7760DECL_NO_INLINE(IEM_STATIC, void)
7761iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7762{
7763 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7764 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7765 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7766 iemFpuStackPushOverflowOnly(pFpuCtx);
7767}
7768
7769
7770IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7771{
7772 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7773 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7774 if (pFpuCtx->FTW & RT_BIT(iReg))
7775 return VINF_SUCCESS;
7776 return VERR_NOT_FOUND;
7777}
7778
7779
7780IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7781{
7782 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7783 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7784 if (pFpuCtx->FTW & RT_BIT(iReg))
7785 {
7786 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7787 return VINF_SUCCESS;
7788 }
7789 return VERR_NOT_FOUND;
7790}
7791
7792
7793IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7794 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7795{
7796 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7797 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7798 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7799 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7800 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7801 {
7802 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7803 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7804 return VINF_SUCCESS;
7805 }
7806 return VERR_NOT_FOUND;
7807}
7808
7809
7810IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7811{
7812 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
7813 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7814 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7815 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7816 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7817 {
7818 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7819 return VINF_SUCCESS;
7820 }
7821 return VERR_NOT_FOUND;
7822}
7823
7824
7825/**
7826 * Updates the FPU exception status after FCW is changed.
7827 *
7828 * @param pFpuCtx The FPU context.
7829 */
7830IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7831{
7832 uint16_t u16Fsw = pFpuCtx->FSW;
7833 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7834 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7835 else
7836 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7837 pFpuCtx->FSW = u16Fsw;
7838}
7839
7840
7841/**
7842 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7843 *
7844 * @returns The full FTW.
7845 * @param pFpuCtx The FPU context.
7846 */
7847IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7848{
7849 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7850 uint16_t u16Ftw = 0;
7851 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7852 for (unsigned iSt = 0; iSt < 8; iSt++)
7853 {
7854 unsigned const iReg = (iSt + iTop) & 7;
7855 if (!(u8Ftw & RT_BIT(iReg)))
7856 u16Ftw |= 3 << (iReg * 2); /* empty */
7857 else
7858 {
7859 uint16_t uTag;
7860 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7861 if (pr80Reg->s.uExponent == 0x7fff)
7862 uTag = 2; /* Exponent is all 1's => Special. */
7863 else if (pr80Reg->s.uExponent == 0x0000)
7864 {
7865 if (pr80Reg->s.u64Mantissa == 0x0000)
7866 uTag = 1; /* All bits are zero => Zero. */
7867 else
7868 uTag = 2; /* Must be special. */
7869 }
7870 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7871 uTag = 0; /* Valid. */
7872 else
7873 uTag = 2; /* Must be special. */
7874
7875 u16Ftw |= uTag << (iReg * 2); /* empty */
7876 }
7877 }
7878
7879 return u16Ftw;
7880}
7881
7882
7883/**
7884 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7885 *
7886 * @returns The compressed FTW.
7887 * @param u16FullFtw The full FTW to convert.
7888 */
7889IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7890{
7891 uint8_t u8Ftw = 0;
7892 for (unsigned i = 0; i < 8; i++)
7893 {
7894 if ((u16FullFtw & 3) != 3 /*empty*/)
7895 u8Ftw |= RT_BIT(i);
7896 u16FullFtw >>= 2;
7897 }
7898
7899 return u8Ftw;
7900}
7901
7902/** @} */
7903
7904
7905/** @name Memory access.
7906 *
7907 * @{
7908 */
7909
7910
7911/**
7912 * Updates the IEMCPU::cbWritten counter if applicable.
7913 *
7914 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7915 * @param fAccess The access being accounted for.
7916 * @param cbMem The access size.
7917 */
7918DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7919{
7920 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7921 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7922 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7923}
7924
7925
7926/**
7927 * Checks if the given segment can be written to, raise the appropriate
7928 * exception if not.
7929 *
7930 * @returns VBox strict status code.
7931 *
7932 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7933 * @param pHid Pointer to the hidden register.
7934 * @param iSegReg The register number.
7935 * @param pu64BaseAddr Where to return the base address to use for the
7936 * segment. (In 64-bit code it may differ from the
7937 * base in the hidden segment.)
7938 */
7939IEM_STATIC VBOXSTRICTRC
7940iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7941{
7942 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7943
7944 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7945 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7946 else
7947 {
7948 if (!pHid->Attr.n.u1Present)
7949 {
7950 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7951 AssertRelease(uSel == 0);
7952 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7953 return iemRaiseGeneralProtectionFault0(pVCpu);
7954 }
7955
7956 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7957 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7958 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7959 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7960 *pu64BaseAddr = pHid->u64Base;
7961 }
7962 return VINF_SUCCESS;
7963}
7964
7965
7966/**
7967 * Checks if the given segment can be read from, raise the appropriate
7968 * exception if not.
7969 *
7970 * @returns VBox strict status code.
7971 *
7972 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7973 * @param pHid Pointer to the hidden register.
7974 * @param iSegReg The register number.
7975 * @param pu64BaseAddr Where to return the base address to use for the
7976 * segment. (In 64-bit code it may differ from the
7977 * base in the hidden segment.)
7978 */
7979IEM_STATIC VBOXSTRICTRC
7980iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7981{
7982 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7983
7984 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7985 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7986 else
7987 {
7988 if (!pHid->Attr.n.u1Present)
7989 {
7990 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7991 AssertRelease(uSel == 0);
7992 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7993 return iemRaiseGeneralProtectionFault0(pVCpu);
7994 }
7995
7996 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7997 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7998 *pu64BaseAddr = pHid->u64Base;
7999 }
8000 return VINF_SUCCESS;
8001}
8002
8003
8004/**
8005 * Applies the segment limit, base and attributes.
8006 *
8007 * This may raise a \#GP or \#SS.
8008 *
8009 * @returns VBox strict status code.
8010 *
8011 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8012 * @param fAccess The kind of access which is being performed.
8013 * @param iSegReg The index of the segment register to apply.
8014 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8015 * TSS, ++).
8016 * @param cbMem The access size.
8017 * @param pGCPtrMem Pointer to the guest memory address to apply
8018 * segmentation to. Input and output parameter.
8019 */
8020IEM_STATIC VBOXSTRICTRC
8021iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8022{
8023 if (iSegReg == UINT8_MAX)
8024 return VINF_SUCCESS;
8025
8026 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8027 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8028 switch (pVCpu->iem.s.enmCpuMode)
8029 {
8030 case IEMMODE_16BIT:
8031 case IEMMODE_32BIT:
8032 {
8033 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8034 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8035
8036 if ( pSel->Attr.n.u1Present
8037 && !pSel->Attr.n.u1Unusable)
8038 {
8039 Assert(pSel->Attr.n.u1DescType);
8040 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8041 {
8042 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8043 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8044 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8045
8046 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8047 {
8048 /** @todo CPL check. */
8049 }
8050
8051 /*
8052 * There are two kinds of data selectors, normal and expand down.
8053 */
8054 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8055 {
8056 if ( GCPtrFirst32 > pSel->u32Limit
8057 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8058 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8059 }
8060 else
8061 {
8062 /*
8063 * The upper boundary is defined by the B bit, not the G bit!
8064 */
8065 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8066 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8067 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8068 }
8069 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8070 }
8071 else
8072 {
8073
8074 /*
8075 * Code selector and usually be used to read thru, writing is
8076 * only permitted in real and V8086 mode.
8077 */
8078 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8079 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8080 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8081 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8082 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8083
8084 if ( GCPtrFirst32 > pSel->u32Limit
8085 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8086 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8087
8088 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8089 {
8090 /** @todo CPL check. */
8091 }
8092
8093 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8094 }
8095 }
8096 else
8097 return iemRaiseGeneralProtectionFault0(pVCpu);
8098 return VINF_SUCCESS;
8099 }
8100
8101 case IEMMODE_64BIT:
8102 {
8103 RTGCPTR GCPtrMem = *pGCPtrMem;
8104 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8105 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8106
8107 Assert(cbMem >= 1);
8108 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8109 return VINF_SUCCESS;
8110 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8111 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8112 return iemRaiseGeneralProtectionFault0(pVCpu);
8113 }
8114
8115 default:
8116 AssertFailedReturn(VERR_IEM_IPE_7);
8117 }
8118}
8119
8120
8121/**
8122 * Translates a virtual address to a physical physical address and checks if we
8123 * can access the page as specified.
8124 *
8125 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8126 * @param GCPtrMem The virtual address.
8127 * @param fAccess The intended access.
8128 * @param pGCPhysMem Where to return the physical address.
8129 */
8130IEM_STATIC VBOXSTRICTRC
8131iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8132{
8133 /** @todo Need a different PGM interface here. We're currently using
8134 * generic / REM interfaces. this won't cut it for R0. */
8135 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8136 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8137 * here. */
8138 RTGCPHYS GCPhys;
8139 uint64_t fFlags;
8140 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &fFlags, &GCPhys);
8141 if (RT_FAILURE(rc))
8142 {
8143 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8144 /** @todo Check unassigned memory in unpaged mode. */
8145 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8146 *pGCPhysMem = NIL_RTGCPHYS;
8147 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8148 }
8149
8150 /* If the page is writable and does not have the no-exec bit set, all
8151 access is allowed. Otherwise we'll have to check more carefully... */
8152 if ((fFlags & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8153 {
8154 /* Write to read only memory? */
8155 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8156 && !(fFlags & X86_PTE_RW)
8157 && ( ( pVCpu->iem.s.uCpl == 3
8158 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8159 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8160 {
8161 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8162 *pGCPhysMem = NIL_RTGCPHYS;
8163 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8164 }
8165
8166 /* Kernel memory accessed by userland? */
8167 if ( !(fFlags & X86_PTE_US)
8168 && pVCpu->iem.s.uCpl == 3
8169 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8170 {
8171 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8172 *pGCPhysMem = NIL_RTGCPHYS;
8173 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8174 }
8175
8176 /* Executing non-executable memory? */
8177 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8178 && (fFlags & X86_PTE_PAE_NX)
8179 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8180 {
8181 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8182 *pGCPhysMem = NIL_RTGCPHYS;
8183 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8184 VERR_ACCESS_DENIED);
8185 }
8186 }
8187
8188 /*
8189 * Set the dirty / access flags.
8190 * ASSUMES this is set when the address is translated rather than on committ...
8191 */
8192 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8193 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8194 if ((fFlags & fAccessedDirty) != fAccessedDirty)
8195 {
8196 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8197 AssertRC(rc2);
8198 }
8199
8200 GCPhys |= GCPtrMem & PAGE_OFFSET_MASK;
8201 *pGCPhysMem = GCPhys;
8202 return VINF_SUCCESS;
8203}
8204
8205
8206
8207/**
8208 * Maps a physical page.
8209 *
8210 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8212 * @param GCPhysMem The physical address.
8213 * @param fAccess The intended access.
8214 * @param ppvMem Where to return the mapping address.
8215 * @param pLock The PGM lock.
8216 */
8217IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8218{
8219#ifdef IEM_LOG_MEMORY_WRITES
8220 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8221 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8222#endif
8223
8224 /** @todo This API may require some improving later. A private deal with PGM
8225 * regarding locking and unlocking needs to be struct. A couple of TLBs
8226 * living in PGM, but with publicly accessible inlined access methods
8227 * could perhaps be an even better solution. */
8228 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8229 GCPhysMem,
8230 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8231 pVCpu->iem.s.fBypassHandlers,
8232 ppvMem,
8233 pLock);
8234 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8235 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8236
8237 return rc;
8238}
8239
8240
8241/**
8242 * Unmap a page previously mapped by iemMemPageMap.
8243 *
8244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8245 * @param GCPhysMem The physical address.
8246 * @param fAccess The intended access.
8247 * @param pvMem What iemMemPageMap returned.
8248 * @param pLock The PGM lock.
8249 */
8250DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8251{
8252 NOREF(pVCpu);
8253 NOREF(GCPhysMem);
8254 NOREF(fAccess);
8255 NOREF(pvMem);
8256 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8257}
8258
8259
8260/**
8261 * Looks up a memory mapping entry.
8262 *
8263 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8265 * @param pvMem The memory address.
8266 * @param fAccess The access to.
8267 */
8268DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8269{
8270 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8271 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8272 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8273 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8274 return 0;
8275 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8276 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8277 return 1;
8278 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8279 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8280 return 2;
8281 return VERR_NOT_FOUND;
8282}
8283
8284
8285/**
8286 * Finds a free memmap entry when using iNextMapping doesn't work.
8287 *
8288 * @returns Memory mapping index, 1024 on failure.
8289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8290 */
8291IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8292{
8293 /*
8294 * The easy case.
8295 */
8296 if (pVCpu->iem.s.cActiveMappings == 0)
8297 {
8298 pVCpu->iem.s.iNextMapping = 1;
8299 return 0;
8300 }
8301
8302 /* There should be enough mappings for all instructions. */
8303 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8304
8305 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8306 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8307 return i;
8308
8309 AssertFailedReturn(1024);
8310}
8311
8312
8313/**
8314 * Commits a bounce buffer that needs writing back and unmaps it.
8315 *
8316 * @returns Strict VBox status code.
8317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8318 * @param iMemMap The index of the buffer to commit.
8319 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8320 * Always false in ring-3, obviously.
8321 */
8322IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8323{
8324 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8325 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8326#ifdef IN_RING3
8327 Assert(!fPostponeFail);
8328 RT_NOREF_PV(fPostponeFail);
8329#endif
8330
8331 /*
8332 * Do the writing.
8333 */
8334 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8335 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8336 {
8337 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8338 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8339 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8340 if (!pVCpu->iem.s.fBypassHandlers)
8341 {
8342 /*
8343 * Carefully and efficiently dealing with access handler return
8344 * codes make this a little bloated.
8345 */
8346 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8347 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8348 pbBuf,
8349 cbFirst,
8350 PGMACCESSORIGIN_IEM);
8351 if (rcStrict == VINF_SUCCESS)
8352 {
8353 if (cbSecond)
8354 {
8355 rcStrict = PGMPhysWrite(pVM,
8356 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8357 pbBuf + cbFirst,
8358 cbSecond,
8359 PGMACCESSORIGIN_IEM);
8360 if (rcStrict == VINF_SUCCESS)
8361 { /* nothing */ }
8362 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8363 {
8364 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8367 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8368 }
8369#ifndef IN_RING3
8370 else if (fPostponeFail)
8371 {
8372 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8374 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8375 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8376 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8377 return iemSetPassUpStatus(pVCpu, rcStrict);
8378 }
8379#endif
8380 else
8381 {
8382 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8383 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8385 return rcStrict;
8386 }
8387 }
8388 }
8389 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8390 {
8391 if (!cbSecond)
8392 {
8393 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8395 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8396 }
8397 else
8398 {
8399 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8401 pbBuf + cbFirst,
8402 cbSecond,
8403 PGMACCESSORIGIN_IEM);
8404 if (rcStrict2 == VINF_SUCCESS)
8405 {
8406 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8409 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8410 }
8411 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8412 {
8413 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8414 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8416 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8417 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8418 }
8419#ifndef IN_RING3
8420 else if (fPostponeFail)
8421 {
8422 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8423 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8425 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8426 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8427 return iemSetPassUpStatus(pVCpu, rcStrict);
8428 }
8429#endif
8430 else
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8435 return rcStrict2;
8436 }
8437 }
8438 }
8439#ifndef IN_RING3
8440 else if (fPostponeFail)
8441 {
8442 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8445 if (!cbSecond)
8446 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8447 else
8448 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8449 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8450 return iemSetPassUpStatus(pVCpu, rcStrict);
8451 }
8452#endif
8453 else
8454 {
8455 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8456 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8457 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8458 return rcStrict;
8459 }
8460 }
8461 else
8462 {
8463 /*
8464 * No access handlers, much simpler.
8465 */
8466 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8467 if (RT_SUCCESS(rc))
8468 {
8469 if (cbSecond)
8470 {
8471 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8472 if (RT_SUCCESS(rc))
8473 { /* likely */ }
8474 else
8475 {
8476 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8479 return rc;
8480 }
8481 }
8482 }
8483 else
8484 {
8485 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8486 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8488 return rc;
8489 }
8490 }
8491 }
8492
8493#if defined(IEM_LOG_MEMORY_WRITES)
8494 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8495 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8496 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8497 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8498 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8499 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8500
8501 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8502 g_cbIemWrote = cbWrote;
8503 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8504#endif
8505
8506 /*
8507 * Free the mapping entry.
8508 */
8509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8510 Assert(pVCpu->iem.s.cActiveMappings != 0);
8511 pVCpu->iem.s.cActiveMappings--;
8512 return VINF_SUCCESS;
8513}
8514
8515
8516/**
8517 * iemMemMap worker that deals with a request crossing pages.
8518 */
8519IEM_STATIC VBOXSTRICTRC
8520iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8521{
8522 /*
8523 * Do the address translations.
8524 */
8525 RTGCPHYS GCPhysFirst;
8526 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8527 if (rcStrict != VINF_SUCCESS)
8528 return rcStrict;
8529
8530 RTGCPHYS GCPhysSecond;
8531 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8532 fAccess, &GCPhysSecond);
8533 if (rcStrict != VINF_SUCCESS)
8534 return rcStrict;
8535 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8536
8537 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8538
8539 /*
8540 * Read in the current memory content if it's a read, execute or partial
8541 * write access.
8542 */
8543 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8544 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8545 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8546
8547 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8548 {
8549 if (!pVCpu->iem.s.fBypassHandlers)
8550 {
8551 /*
8552 * Must carefully deal with access handler status codes here,
8553 * makes the code a bit bloated.
8554 */
8555 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8556 if (rcStrict == VINF_SUCCESS)
8557 {
8558 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8559 if (rcStrict == VINF_SUCCESS)
8560 { /*likely */ }
8561 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8562 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8563 else
8564 {
8565 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8566 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8567 return rcStrict;
8568 }
8569 }
8570 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8571 {
8572 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8573 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8574 {
8575 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8576 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8577 }
8578 else
8579 {
8580 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8581 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8582 return rcStrict2;
8583 }
8584 }
8585 else
8586 {
8587 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8588 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8589 return rcStrict;
8590 }
8591 }
8592 else
8593 {
8594 /*
8595 * No informational status codes here, much more straight forward.
8596 */
8597 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8598 if (RT_SUCCESS(rc))
8599 {
8600 Assert(rc == VINF_SUCCESS);
8601 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8602 if (RT_SUCCESS(rc))
8603 Assert(rc == VINF_SUCCESS);
8604 else
8605 {
8606 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8607 return rc;
8608 }
8609 }
8610 else
8611 {
8612 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8613 return rc;
8614 }
8615 }
8616 }
8617#ifdef VBOX_STRICT
8618 else
8619 memset(pbBuf, 0xcc, cbMem);
8620 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8621 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8622#endif
8623
8624 /*
8625 * Commit the bounce buffer entry.
8626 */
8627 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8628 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8629 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8630 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8631 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8632 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8633 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8634 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8635 pVCpu->iem.s.cActiveMappings++;
8636
8637 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8638 *ppvMem = pbBuf;
8639 return VINF_SUCCESS;
8640}
8641
8642
8643/**
8644 * iemMemMap woker that deals with iemMemPageMap failures.
8645 */
8646IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8647 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8648{
8649 /*
8650 * Filter out conditions we can handle and the ones which shouldn't happen.
8651 */
8652 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8653 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8654 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8655 {
8656 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8657 return rcMap;
8658 }
8659 pVCpu->iem.s.cPotentialExits++;
8660
8661 /*
8662 * Read in the current memory content if it's a read, execute or partial
8663 * write access.
8664 */
8665 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8666 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8667 {
8668 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8669 memset(pbBuf, 0xff, cbMem);
8670 else
8671 {
8672 int rc;
8673 if (!pVCpu->iem.s.fBypassHandlers)
8674 {
8675 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8676 if (rcStrict == VINF_SUCCESS)
8677 { /* nothing */ }
8678 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8679 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8680 else
8681 {
8682 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8683 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8684 return rcStrict;
8685 }
8686 }
8687 else
8688 {
8689 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8690 if (RT_SUCCESS(rc))
8691 { /* likely */ }
8692 else
8693 {
8694 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8695 GCPhysFirst, rc));
8696 return rc;
8697 }
8698 }
8699 }
8700 }
8701#ifdef VBOX_STRICT
8702 else
8703 memset(pbBuf, 0xcc, cbMem);
8704#endif
8705#ifdef VBOX_STRICT
8706 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8707 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8708#endif
8709
8710 /*
8711 * Commit the bounce buffer entry.
8712 */
8713 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8714 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8715 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8716 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8717 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8718 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8719 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8720 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8721 pVCpu->iem.s.cActiveMappings++;
8722
8723 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8724 *ppvMem = pbBuf;
8725 return VINF_SUCCESS;
8726}
8727
8728
8729
8730/**
8731 * Maps the specified guest memory for the given kind of access.
8732 *
8733 * This may be using bounce buffering of the memory if it's crossing a page
8734 * boundary or if there is an access handler installed for any of it. Because
8735 * of lock prefix guarantees, we're in for some extra clutter when this
8736 * happens.
8737 *
8738 * This may raise a \#GP, \#SS, \#PF or \#AC.
8739 *
8740 * @returns VBox strict status code.
8741 *
8742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8743 * @param ppvMem Where to return the pointer to the mapped
8744 * memory.
8745 * @param cbMem The number of bytes to map. This is usually 1,
8746 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8747 * string operations it can be up to a page.
8748 * @param iSegReg The index of the segment register to use for
8749 * this access. The base and limits are checked.
8750 * Use UINT8_MAX to indicate that no segmentation
8751 * is required (for IDT, GDT and LDT accesses).
8752 * @param GCPtrMem The address of the guest memory.
8753 * @param fAccess How the memory is being accessed. The
8754 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8755 * how to map the memory, while the
8756 * IEM_ACCESS_WHAT_XXX bit is used when raising
8757 * exceptions.
8758 */
8759IEM_STATIC VBOXSTRICTRC
8760iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8761{
8762 /*
8763 * Check the input and figure out which mapping entry to use.
8764 */
8765 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8766 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8767 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8768
8769 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8770 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8771 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8772 {
8773 iMemMap = iemMemMapFindFree(pVCpu);
8774 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8775 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8776 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8777 pVCpu->iem.s.aMemMappings[2].fAccess),
8778 VERR_IEM_IPE_9);
8779 }
8780
8781 /*
8782 * Map the memory, checking that we can actually access it. If something
8783 * slightly complicated happens, fall back on bounce buffering.
8784 */
8785 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8786 if (rcStrict != VINF_SUCCESS)
8787 return rcStrict;
8788
8789 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8790 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8791
8792 RTGCPHYS GCPhysFirst;
8793 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8794 if (rcStrict != VINF_SUCCESS)
8795 return rcStrict;
8796
8797 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8798 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8799 if (fAccess & IEM_ACCESS_TYPE_READ)
8800 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8801
8802 void *pvMem;
8803 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8804 if (rcStrict != VINF_SUCCESS)
8805 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8806
8807 /*
8808 * Fill in the mapping table entry.
8809 */
8810 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8811 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8812 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8813 pVCpu->iem.s.cActiveMappings++;
8814
8815 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8816 *ppvMem = pvMem;
8817
8818 return VINF_SUCCESS;
8819}
8820
8821
8822/**
8823 * Commits the guest memory if bounce buffered and unmaps it.
8824 *
8825 * @returns Strict VBox status code.
8826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8827 * @param pvMem The mapping.
8828 * @param fAccess The kind of access.
8829 */
8830IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8831{
8832 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8833 AssertReturn(iMemMap >= 0, iMemMap);
8834
8835 /* If it's bounce buffered, we may need to write back the buffer. */
8836 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8837 {
8838 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8839 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8840 }
8841 /* Otherwise unlock it. */
8842 else
8843 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8844
8845 /* Free the entry. */
8846 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8847 Assert(pVCpu->iem.s.cActiveMappings != 0);
8848 pVCpu->iem.s.cActiveMappings--;
8849 return VINF_SUCCESS;
8850}
8851
8852#ifdef IEM_WITH_SETJMP
8853
8854/**
8855 * Maps the specified guest memory for the given kind of access, longjmp on
8856 * error.
8857 *
8858 * This may be using bounce buffering of the memory if it's crossing a page
8859 * boundary or if there is an access handler installed for any of it. Because
8860 * of lock prefix guarantees, we're in for some extra clutter when this
8861 * happens.
8862 *
8863 * This may raise a \#GP, \#SS, \#PF or \#AC.
8864 *
8865 * @returns Pointer to the mapped memory.
8866 *
8867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8868 * @param cbMem The number of bytes to map. This is usually 1,
8869 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8870 * string operations it can be up to a page.
8871 * @param iSegReg The index of the segment register to use for
8872 * this access. The base and limits are checked.
8873 * Use UINT8_MAX to indicate that no segmentation
8874 * is required (for IDT, GDT and LDT accesses).
8875 * @param GCPtrMem The address of the guest memory.
8876 * @param fAccess How the memory is being accessed. The
8877 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8878 * how to map the memory, while the
8879 * IEM_ACCESS_WHAT_XXX bit is used when raising
8880 * exceptions.
8881 */
8882IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8883{
8884 /*
8885 * Check the input and figure out which mapping entry to use.
8886 */
8887 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8888 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8889 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8890
8891 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8892 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8893 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8894 {
8895 iMemMap = iemMemMapFindFree(pVCpu);
8896 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8897 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8898 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8899 pVCpu->iem.s.aMemMappings[2].fAccess),
8900 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8901 }
8902
8903 /*
8904 * Map the memory, checking that we can actually access it. If something
8905 * slightly complicated happens, fall back on bounce buffering.
8906 */
8907 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8908 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8909 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8910
8911 /* Crossing a page boundary? */
8912 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8913 { /* No (likely). */ }
8914 else
8915 {
8916 void *pvMem;
8917 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8918 if (rcStrict == VINF_SUCCESS)
8919 return pvMem;
8920 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8921 }
8922
8923 RTGCPHYS GCPhysFirst;
8924 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8925 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8926 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8927
8928 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8929 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8930 if (fAccess & IEM_ACCESS_TYPE_READ)
8931 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8932
8933 void *pvMem;
8934 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8935 if (rcStrict == VINF_SUCCESS)
8936 { /* likely */ }
8937 else
8938 {
8939 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8940 if (rcStrict == VINF_SUCCESS)
8941 return pvMem;
8942 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8943 }
8944
8945 /*
8946 * Fill in the mapping table entry.
8947 */
8948 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8950 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8951 pVCpu->iem.s.cActiveMappings++;
8952
8953 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8954 return pvMem;
8955}
8956
8957
8958/**
8959 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8960 *
8961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8962 * @param pvMem The mapping.
8963 * @param fAccess The kind of access.
8964 */
8965IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8966{
8967 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8968 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8969
8970 /* If it's bounce buffered, we may need to write back the buffer. */
8971 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8972 {
8973 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8974 {
8975 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8976 if (rcStrict == VINF_SUCCESS)
8977 return;
8978 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8979 }
8980 }
8981 /* Otherwise unlock it. */
8982 else
8983 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8984
8985 /* Free the entry. */
8986 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8987 Assert(pVCpu->iem.s.cActiveMappings != 0);
8988 pVCpu->iem.s.cActiveMappings--;
8989}
8990
8991#endif /* IEM_WITH_SETJMP */
8992
8993#ifndef IN_RING3
8994/**
8995 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8996 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8997 *
8998 * Allows the instruction to be completed and retired, while the IEM user will
8999 * return to ring-3 immediately afterwards and do the postponed writes there.
9000 *
9001 * @returns VBox status code (no strict statuses). Caller must check
9002 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9004 * @param pvMem The mapping.
9005 * @param fAccess The kind of access.
9006 */
9007IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9008{
9009 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9010 AssertReturn(iMemMap >= 0, iMemMap);
9011
9012 /* If it's bounce buffered, we may need to write back the buffer. */
9013 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9014 {
9015 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9016 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9017 }
9018 /* Otherwise unlock it. */
9019 else
9020 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9021
9022 /* Free the entry. */
9023 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9024 Assert(pVCpu->iem.s.cActiveMappings != 0);
9025 pVCpu->iem.s.cActiveMappings--;
9026 return VINF_SUCCESS;
9027}
9028#endif
9029
9030
9031/**
9032 * Rollbacks mappings, releasing page locks and such.
9033 *
9034 * The caller shall only call this after checking cActiveMappings.
9035 *
9036 * @returns Strict VBox status code to pass up.
9037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9038 */
9039IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9040{
9041 Assert(pVCpu->iem.s.cActiveMappings > 0);
9042
9043 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9044 while (iMemMap-- > 0)
9045 {
9046 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9047 if (fAccess != IEM_ACCESS_INVALID)
9048 {
9049 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9050 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9051 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9052 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9053 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9054 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9055 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9056 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9057 pVCpu->iem.s.cActiveMappings--;
9058 }
9059 }
9060}
9061
9062
9063/**
9064 * Fetches a data byte.
9065 *
9066 * @returns Strict VBox status code.
9067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9068 * @param pu8Dst Where to return the byte.
9069 * @param iSegReg The index of the segment register to use for
9070 * this access. The base and limits are checked.
9071 * @param GCPtrMem The address of the guest memory.
9072 */
9073IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9074{
9075 /* The lazy approach for now... */
9076 uint8_t const *pu8Src;
9077 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9078 if (rc == VINF_SUCCESS)
9079 {
9080 *pu8Dst = *pu8Src;
9081 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9082 }
9083 return rc;
9084}
9085
9086
9087#ifdef IEM_WITH_SETJMP
9088/**
9089 * Fetches a data byte, longjmp on error.
9090 *
9091 * @returns The byte.
9092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9093 * @param iSegReg The index of the segment register to use for
9094 * this access. The base and limits are checked.
9095 * @param GCPtrMem The address of the guest memory.
9096 */
9097DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9098{
9099 /* The lazy approach for now... */
9100 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9101 uint8_t const bRet = *pu8Src;
9102 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9103 return bRet;
9104}
9105#endif /* IEM_WITH_SETJMP */
9106
9107
9108/**
9109 * Fetches a data word.
9110 *
9111 * @returns Strict VBox status code.
9112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9113 * @param pu16Dst Where to return the word.
9114 * @param iSegReg The index of the segment register to use for
9115 * this access. The base and limits are checked.
9116 * @param GCPtrMem The address of the guest memory.
9117 */
9118IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9119{
9120 /* The lazy approach for now... */
9121 uint16_t const *pu16Src;
9122 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9123 if (rc == VINF_SUCCESS)
9124 {
9125 *pu16Dst = *pu16Src;
9126 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9127 }
9128 return rc;
9129}
9130
9131
9132#ifdef IEM_WITH_SETJMP
9133/**
9134 * Fetches a data word, longjmp on error.
9135 *
9136 * @returns The word
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param iSegReg The index of the segment register to use for
9139 * this access. The base and limits are checked.
9140 * @param GCPtrMem The address of the guest memory.
9141 */
9142DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9143{
9144 /* The lazy approach for now... */
9145 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9146 uint16_t const u16Ret = *pu16Src;
9147 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9148 return u16Ret;
9149}
9150#endif
9151
9152
9153/**
9154 * Fetches a data dword.
9155 *
9156 * @returns Strict VBox status code.
9157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9158 * @param pu32Dst Where to return the dword.
9159 * @param iSegReg The index of the segment register to use for
9160 * this access. The base and limits are checked.
9161 * @param GCPtrMem The address of the guest memory.
9162 */
9163IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9164{
9165 /* The lazy approach for now... */
9166 uint32_t const *pu32Src;
9167 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9168 if (rc == VINF_SUCCESS)
9169 {
9170 *pu32Dst = *pu32Src;
9171 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9172 }
9173 return rc;
9174}
9175
9176
9177/**
9178 * Fetches a data dword and zero extends it to a qword.
9179 *
9180 * @returns Strict VBox status code.
9181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9182 * @param pu64Dst Where to return the qword.
9183 * @param iSegReg The index of the segment register to use for
9184 * this access. The base and limits are checked.
9185 * @param GCPtrMem The address of the guest memory.
9186 */
9187IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9188{
9189 /* The lazy approach for now... */
9190 uint32_t const *pu32Src;
9191 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9192 if (rc == VINF_SUCCESS)
9193 {
9194 *pu64Dst = *pu32Src;
9195 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9196 }
9197 return rc;
9198}
9199
9200
9201#ifdef IEM_WITH_SETJMP
9202
9203IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9204{
9205 Assert(cbMem >= 1);
9206 Assert(iSegReg < X86_SREG_COUNT);
9207
9208 /*
9209 * 64-bit mode is simpler.
9210 */
9211 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9212 {
9213 if (iSegReg >= X86_SREG_FS)
9214 {
9215 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9216 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9217 GCPtrMem += pSel->u64Base;
9218 }
9219
9220 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9221 return GCPtrMem;
9222 }
9223 /*
9224 * 16-bit and 32-bit segmentation.
9225 */
9226 else
9227 {
9228 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9229 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9230 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9231 == X86DESCATTR_P /* data, expand up */
9232 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9233 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9234 {
9235 /* expand up */
9236 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9237 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9238 && GCPtrLast32 > (uint32_t)GCPtrMem))
9239 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9240 }
9241 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9242 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9243 {
9244 /* expand down */
9245 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9246 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9247 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9248 && GCPtrLast32 > (uint32_t)GCPtrMem))
9249 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9250 }
9251 else
9252 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9253 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9254 }
9255 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9256}
9257
9258
9259IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9260{
9261 Assert(cbMem >= 1);
9262 Assert(iSegReg < X86_SREG_COUNT);
9263
9264 /*
9265 * 64-bit mode is simpler.
9266 */
9267 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9268 {
9269 if (iSegReg >= X86_SREG_FS)
9270 {
9271 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9272 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9273 GCPtrMem += pSel->u64Base;
9274 }
9275
9276 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9277 return GCPtrMem;
9278 }
9279 /*
9280 * 16-bit and 32-bit segmentation.
9281 */
9282 else
9283 {
9284 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9285 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9286 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9287 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9288 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9289 {
9290 /* expand up */
9291 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9292 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9293 && GCPtrLast32 > (uint32_t)GCPtrMem))
9294 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9295 }
9296 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9297 {
9298 /* expand down */
9299 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9300 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9301 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9302 && GCPtrLast32 > (uint32_t)GCPtrMem))
9303 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9304 }
9305 else
9306 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9307 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9308 }
9309 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9310}
9311
9312
9313/**
9314 * Fetches a data dword, longjmp on error, fallback/safe version.
9315 *
9316 * @returns The dword
9317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9318 * @param iSegReg The index of the segment register to use for
9319 * this access. The base and limits are checked.
9320 * @param GCPtrMem The address of the guest memory.
9321 */
9322IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9323{
9324 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9325 uint32_t const u32Ret = *pu32Src;
9326 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9327 return u32Ret;
9328}
9329
9330
9331/**
9332 * Fetches a data dword, longjmp on error.
9333 *
9334 * @returns The dword
9335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9336 * @param iSegReg The index of the segment register to use for
9337 * this access. The base and limits are checked.
9338 * @param GCPtrMem The address of the guest memory.
9339 */
9340DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9341{
9342# ifdef IEM_WITH_DATA_TLB
9343 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9344 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9345 {
9346 /// @todo more later.
9347 }
9348
9349 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9350# else
9351 /* The lazy approach. */
9352 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9353 uint32_t const u32Ret = *pu32Src;
9354 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9355 return u32Ret;
9356# endif
9357}
9358#endif
9359
9360
9361#ifdef SOME_UNUSED_FUNCTION
9362/**
9363 * Fetches a data dword and sign extends it to a qword.
9364 *
9365 * @returns Strict VBox status code.
9366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9367 * @param pu64Dst Where to return the sign extended value.
9368 * @param iSegReg The index of the segment register to use for
9369 * this access. The base and limits are checked.
9370 * @param GCPtrMem The address of the guest memory.
9371 */
9372IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9373{
9374 /* The lazy approach for now... */
9375 int32_t const *pi32Src;
9376 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9377 if (rc == VINF_SUCCESS)
9378 {
9379 *pu64Dst = *pi32Src;
9380 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9381 }
9382#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9383 else
9384 *pu64Dst = 0;
9385#endif
9386 return rc;
9387}
9388#endif
9389
9390
9391/**
9392 * Fetches a data qword.
9393 *
9394 * @returns Strict VBox status code.
9395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9396 * @param pu64Dst Where to return the qword.
9397 * @param iSegReg The index of the segment register to use for
9398 * this access. The base and limits are checked.
9399 * @param GCPtrMem The address of the guest memory.
9400 */
9401IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9402{
9403 /* The lazy approach for now... */
9404 uint64_t const *pu64Src;
9405 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9406 if (rc == VINF_SUCCESS)
9407 {
9408 *pu64Dst = *pu64Src;
9409 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9410 }
9411 return rc;
9412}
9413
9414
9415#ifdef IEM_WITH_SETJMP
9416/**
9417 * Fetches a data qword, longjmp on error.
9418 *
9419 * @returns The qword.
9420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9421 * @param iSegReg The index of the segment register to use for
9422 * this access. The base and limits are checked.
9423 * @param GCPtrMem The address of the guest memory.
9424 */
9425DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9426{
9427 /* The lazy approach for now... */
9428 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9429 uint64_t const u64Ret = *pu64Src;
9430 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9431 return u64Ret;
9432}
9433#endif
9434
9435
9436/**
9437 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9438 *
9439 * @returns Strict VBox status code.
9440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9441 * @param pu64Dst Where to return the qword.
9442 * @param iSegReg The index of the segment register to use for
9443 * this access. The base and limits are checked.
9444 * @param GCPtrMem The address of the guest memory.
9445 */
9446IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9447{
9448 /* The lazy approach for now... */
9449 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9450 if (RT_UNLIKELY(GCPtrMem & 15))
9451 return iemRaiseGeneralProtectionFault0(pVCpu);
9452
9453 uint64_t const *pu64Src;
9454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9455 if (rc == VINF_SUCCESS)
9456 {
9457 *pu64Dst = *pu64Src;
9458 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9459 }
9460 return rc;
9461}
9462
9463
9464#ifdef IEM_WITH_SETJMP
9465/**
9466 * Fetches a data qword, longjmp on error.
9467 *
9468 * @returns The qword.
9469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9470 * @param iSegReg The index of the segment register to use for
9471 * this access. The base and limits are checked.
9472 * @param GCPtrMem The address of the guest memory.
9473 */
9474DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9475{
9476 /* The lazy approach for now... */
9477 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9478 if (RT_LIKELY(!(GCPtrMem & 15)))
9479 {
9480 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9481 uint64_t const u64Ret = *pu64Src;
9482 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9483 return u64Ret;
9484 }
9485
9486 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9487 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9488}
9489#endif
9490
9491
9492/**
9493 * Fetches a data tword.
9494 *
9495 * @returns Strict VBox status code.
9496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9497 * @param pr80Dst Where to return the tword.
9498 * @param iSegReg The index of the segment register to use for
9499 * this access. The base and limits are checked.
9500 * @param GCPtrMem The address of the guest memory.
9501 */
9502IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9503{
9504 /* The lazy approach for now... */
9505 PCRTFLOAT80U pr80Src;
9506 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9507 if (rc == VINF_SUCCESS)
9508 {
9509 *pr80Dst = *pr80Src;
9510 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9511 }
9512 return rc;
9513}
9514
9515
9516#ifdef IEM_WITH_SETJMP
9517/**
9518 * Fetches a data tword, longjmp on error.
9519 *
9520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9521 * @param pr80Dst Where to return the tword.
9522 * @param iSegReg The index of the segment register to use for
9523 * this access. The base and limits are checked.
9524 * @param GCPtrMem The address of the guest memory.
9525 */
9526DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9527{
9528 /* The lazy approach for now... */
9529 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9530 *pr80Dst = *pr80Src;
9531 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9532}
9533#endif
9534
9535
9536/**
9537 * Fetches a data dqword (double qword), generally SSE related.
9538 *
9539 * @returns Strict VBox status code.
9540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9541 * @param pu128Dst Where to return the qword.
9542 * @param iSegReg The index of the segment register to use for
9543 * this access. The base and limits are checked.
9544 * @param GCPtrMem The address of the guest memory.
9545 */
9546IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9547{
9548 /* The lazy approach for now... */
9549 PCRTUINT128U pu128Src;
9550 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9551 if (rc == VINF_SUCCESS)
9552 {
9553 pu128Dst->au64[0] = pu128Src->au64[0];
9554 pu128Dst->au64[1] = pu128Src->au64[1];
9555 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9556 }
9557 return rc;
9558}
9559
9560
9561#ifdef IEM_WITH_SETJMP
9562/**
9563 * Fetches a data dqword (double qword), generally SSE related.
9564 *
9565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9566 * @param pu128Dst Where to return the qword.
9567 * @param iSegReg The index of the segment register to use for
9568 * this access. The base and limits are checked.
9569 * @param GCPtrMem The address of the guest memory.
9570 */
9571IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9572{
9573 /* The lazy approach for now... */
9574 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9575 pu128Dst->au64[0] = pu128Src->au64[0];
9576 pu128Dst->au64[1] = pu128Src->au64[1];
9577 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9578}
9579#endif
9580
9581
9582/**
9583 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9584 * related.
9585 *
9586 * Raises \#GP(0) if not aligned.
9587 *
9588 * @returns Strict VBox status code.
9589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9590 * @param pu128Dst Where to return the qword.
9591 * @param iSegReg The index of the segment register to use for
9592 * this access. The base and limits are checked.
9593 * @param GCPtrMem The address of the guest memory.
9594 */
9595IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9596{
9597 /* The lazy approach for now... */
9598 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9599 if ( (GCPtrMem & 15)
9600 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9601 return iemRaiseGeneralProtectionFault0(pVCpu);
9602
9603 PCRTUINT128U pu128Src;
9604 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9605 if (rc == VINF_SUCCESS)
9606 {
9607 pu128Dst->au64[0] = pu128Src->au64[0];
9608 pu128Dst->au64[1] = pu128Src->au64[1];
9609 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9610 }
9611 return rc;
9612}
9613
9614
9615#ifdef IEM_WITH_SETJMP
9616/**
9617 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9618 * related, longjmp on error.
9619 *
9620 * Raises \#GP(0) if not aligned.
9621 *
9622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9623 * @param pu128Dst Where to return the qword.
9624 * @param iSegReg The index of the segment register to use for
9625 * this access. The base and limits are checked.
9626 * @param GCPtrMem The address of the guest memory.
9627 */
9628DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9629{
9630 /* The lazy approach for now... */
9631 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9632 if ( (GCPtrMem & 15) == 0
9633 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9634 {
9635 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9636 pu128Dst->au64[0] = pu128Src->au64[0];
9637 pu128Dst->au64[1] = pu128Src->au64[1];
9638 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9639 return;
9640 }
9641
9642 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9643 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9644}
9645#endif
9646
9647
9648/**
9649 * Fetches a data oword (octo word), generally AVX related.
9650 *
9651 * @returns Strict VBox status code.
9652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9653 * @param pu256Dst Where to return the qword.
9654 * @param iSegReg The index of the segment register to use for
9655 * this access. The base and limits are checked.
9656 * @param GCPtrMem The address of the guest memory.
9657 */
9658IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9659{
9660 /* The lazy approach for now... */
9661 PCRTUINT256U pu256Src;
9662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9663 if (rc == VINF_SUCCESS)
9664 {
9665 pu256Dst->au64[0] = pu256Src->au64[0];
9666 pu256Dst->au64[1] = pu256Src->au64[1];
9667 pu256Dst->au64[2] = pu256Src->au64[2];
9668 pu256Dst->au64[3] = pu256Src->au64[3];
9669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9670 }
9671 return rc;
9672}
9673
9674
9675#ifdef IEM_WITH_SETJMP
9676/**
9677 * Fetches a data oword (octo word), generally AVX related.
9678 *
9679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9680 * @param pu256Dst Where to return the qword.
9681 * @param iSegReg The index of the segment register to use for
9682 * this access. The base and limits are checked.
9683 * @param GCPtrMem The address of the guest memory.
9684 */
9685IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9686{
9687 /* The lazy approach for now... */
9688 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9689 pu256Dst->au64[0] = pu256Src->au64[0];
9690 pu256Dst->au64[1] = pu256Src->au64[1];
9691 pu256Dst->au64[2] = pu256Src->au64[2];
9692 pu256Dst->au64[3] = pu256Src->au64[3];
9693 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9694}
9695#endif
9696
9697
9698/**
9699 * Fetches a data oword (octo word) at an aligned address, generally AVX
9700 * related.
9701 *
9702 * Raises \#GP(0) if not aligned.
9703 *
9704 * @returns Strict VBox status code.
9705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9706 * @param pu256Dst Where to return the qword.
9707 * @param iSegReg The index of the segment register to use for
9708 * this access. The base and limits are checked.
9709 * @param GCPtrMem The address of the guest memory.
9710 */
9711IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9712{
9713 /* The lazy approach for now... */
9714 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9715 if (GCPtrMem & 31)
9716 return iemRaiseGeneralProtectionFault0(pVCpu);
9717
9718 PCRTUINT256U pu256Src;
9719 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9720 if (rc == VINF_SUCCESS)
9721 {
9722 pu256Dst->au64[0] = pu256Src->au64[0];
9723 pu256Dst->au64[1] = pu256Src->au64[1];
9724 pu256Dst->au64[2] = pu256Src->au64[2];
9725 pu256Dst->au64[3] = pu256Src->au64[3];
9726 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9727 }
9728 return rc;
9729}
9730
9731
9732#ifdef IEM_WITH_SETJMP
9733/**
9734 * Fetches a data oword (octo word) at an aligned address, generally AVX
9735 * related, longjmp on error.
9736 *
9737 * Raises \#GP(0) if not aligned.
9738 *
9739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9740 * @param pu256Dst Where to return the qword.
9741 * @param iSegReg The index of the segment register to use for
9742 * this access. The base and limits are checked.
9743 * @param GCPtrMem The address of the guest memory.
9744 */
9745DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9746{
9747 /* The lazy approach for now... */
9748 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9749 if ((GCPtrMem & 31) == 0)
9750 {
9751 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9752 pu256Dst->au64[0] = pu256Src->au64[0];
9753 pu256Dst->au64[1] = pu256Src->au64[1];
9754 pu256Dst->au64[2] = pu256Src->au64[2];
9755 pu256Dst->au64[3] = pu256Src->au64[3];
9756 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9757 return;
9758 }
9759
9760 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9761 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9762}
9763#endif
9764
9765
9766
9767/**
9768 * Fetches a descriptor register (lgdt, lidt).
9769 *
9770 * @returns Strict VBox status code.
9771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9772 * @param pcbLimit Where to return the limit.
9773 * @param pGCPtrBase Where to return the base.
9774 * @param iSegReg The index of the segment register to use for
9775 * this access. The base and limits are checked.
9776 * @param GCPtrMem The address of the guest memory.
9777 * @param enmOpSize The effective operand size.
9778 */
9779IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9780 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9781{
9782 /*
9783 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9784 * little special:
9785 * - The two reads are done separately.
9786 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9787 * - We suspect the 386 to actually commit the limit before the base in
9788 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9789 * don't try emulate this eccentric behavior, because it's not well
9790 * enough understood and rather hard to trigger.
9791 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9792 */
9793 VBOXSTRICTRC rcStrict;
9794 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9795 {
9796 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9797 if (rcStrict == VINF_SUCCESS)
9798 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9799 }
9800 else
9801 {
9802 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9803 if (enmOpSize == IEMMODE_32BIT)
9804 {
9805 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9806 {
9807 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9808 if (rcStrict == VINF_SUCCESS)
9809 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9810 }
9811 else
9812 {
9813 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9814 if (rcStrict == VINF_SUCCESS)
9815 {
9816 *pcbLimit = (uint16_t)uTmp;
9817 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9818 }
9819 }
9820 if (rcStrict == VINF_SUCCESS)
9821 *pGCPtrBase = uTmp;
9822 }
9823 else
9824 {
9825 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9826 if (rcStrict == VINF_SUCCESS)
9827 {
9828 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9829 if (rcStrict == VINF_SUCCESS)
9830 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9831 }
9832 }
9833 }
9834 return rcStrict;
9835}
9836
9837
9838
9839/**
9840 * Stores a data byte.
9841 *
9842 * @returns Strict VBox status code.
9843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9844 * @param iSegReg The index of the segment register to use for
9845 * this access. The base and limits are checked.
9846 * @param GCPtrMem The address of the guest memory.
9847 * @param u8Value The value to store.
9848 */
9849IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9850{
9851 /* The lazy approach for now... */
9852 uint8_t *pu8Dst;
9853 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9854 if (rc == VINF_SUCCESS)
9855 {
9856 *pu8Dst = u8Value;
9857 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9858 }
9859 return rc;
9860}
9861
9862
9863#ifdef IEM_WITH_SETJMP
9864/**
9865 * Stores a data byte, longjmp on error.
9866 *
9867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9868 * @param iSegReg The index of the segment register to use for
9869 * this access. The base and limits are checked.
9870 * @param GCPtrMem The address of the guest memory.
9871 * @param u8Value The value to store.
9872 */
9873IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9874{
9875 /* The lazy approach for now... */
9876 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9877 *pu8Dst = u8Value;
9878 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9879}
9880#endif
9881
9882
9883/**
9884 * Stores a data word.
9885 *
9886 * @returns Strict VBox status code.
9887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9888 * @param iSegReg The index of the segment register to use for
9889 * this access. The base and limits are checked.
9890 * @param GCPtrMem The address of the guest memory.
9891 * @param u16Value The value to store.
9892 */
9893IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9894{
9895 /* The lazy approach for now... */
9896 uint16_t *pu16Dst;
9897 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9898 if (rc == VINF_SUCCESS)
9899 {
9900 *pu16Dst = u16Value;
9901 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9902 }
9903 return rc;
9904}
9905
9906
9907#ifdef IEM_WITH_SETJMP
9908/**
9909 * Stores a data word, longjmp on error.
9910 *
9911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9912 * @param iSegReg The index of the segment register to use for
9913 * this access. The base and limits are checked.
9914 * @param GCPtrMem The address of the guest memory.
9915 * @param u16Value The value to store.
9916 */
9917IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9918{
9919 /* The lazy approach for now... */
9920 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9921 *pu16Dst = u16Value;
9922 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9923}
9924#endif
9925
9926
9927/**
9928 * Stores a data dword.
9929 *
9930 * @returns Strict VBox status code.
9931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9932 * @param iSegReg The index of the segment register to use for
9933 * this access. The base and limits are checked.
9934 * @param GCPtrMem The address of the guest memory.
9935 * @param u32Value The value to store.
9936 */
9937IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9938{
9939 /* The lazy approach for now... */
9940 uint32_t *pu32Dst;
9941 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9942 if (rc == VINF_SUCCESS)
9943 {
9944 *pu32Dst = u32Value;
9945 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9946 }
9947 return rc;
9948}
9949
9950
9951#ifdef IEM_WITH_SETJMP
9952/**
9953 * Stores a data dword.
9954 *
9955 * @returns Strict VBox status code.
9956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9957 * @param iSegReg The index of the segment register to use for
9958 * this access. The base and limits are checked.
9959 * @param GCPtrMem The address of the guest memory.
9960 * @param u32Value The value to store.
9961 */
9962IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9963{
9964 /* The lazy approach for now... */
9965 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9966 *pu32Dst = u32Value;
9967 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9968}
9969#endif
9970
9971
9972/**
9973 * Stores a data qword.
9974 *
9975 * @returns Strict VBox status code.
9976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9977 * @param iSegReg The index of the segment register to use for
9978 * this access. The base and limits are checked.
9979 * @param GCPtrMem The address of the guest memory.
9980 * @param u64Value The value to store.
9981 */
9982IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9983{
9984 /* The lazy approach for now... */
9985 uint64_t *pu64Dst;
9986 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9987 if (rc == VINF_SUCCESS)
9988 {
9989 *pu64Dst = u64Value;
9990 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9991 }
9992 return rc;
9993}
9994
9995
9996#ifdef IEM_WITH_SETJMP
9997/**
9998 * Stores a data qword, longjmp on error.
9999 *
10000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10001 * @param iSegReg The index of the segment register to use for
10002 * this access. The base and limits are checked.
10003 * @param GCPtrMem The address of the guest memory.
10004 * @param u64Value The value to store.
10005 */
10006IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10007{
10008 /* The lazy approach for now... */
10009 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10010 *pu64Dst = u64Value;
10011 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10012}
10013#endif
10014
10015
10016/**
10017 * Stores a data dqword.
10018 *
10019 * @returns Strict VBox status code.
10020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10021 * @param iSegReg The index of the segment register to use for
10022 * this access. The base and limits are checked.
10023 * @param GCPtrMem The address of the guest memory.
10024 * @param u128Value The value to store.
10025 */
10026IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10027{
10028 /* The lazy approach for now... */
10029 PRTUINT128U pu128Dst;
10030 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10031 if (rc == VINF_SUCCESS)
10032 {
10033 pu128Dst->au64[0] = u128Value.au64[0];
10034 pu128Dst->au64[1] = u128Value.au64[1];
10035 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10036 }
10037 return rc;
10038}
10039
10040
10041#ifdef IEM_WITH_SETJMP
10042/**
10043 * Stores a data dqword, longjmp on error.
10044 *
10045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10046 * @param iSegReg The index of the segment register to use for
10047 * this access. The base and limits are checked.
10048 * @param GCPtrMem The address of the guest memory.
10049 * @param u128Value The value to store.
10050 */
10051IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10052{
10053 /* The lazy approach for now... */
10054 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10055 pu128Dst->au64[0] = u128Value.au64[0];
10056 pu128Dst->au64[1] = u128Value.au64[1];
10057 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10058}
10059#endif
10060
10061
10062/**
10063 * Stores a data dqword, SSE aligned.
10064 *
10065 * @returns Strict VBox status code.
10066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10067 * @param iSegReg The index of the segment register to use for
10068 * this access. The base and limits are checked.
10069 * @param GCPtrMem The address of the guest memory.
10070 * @param u128Value The value to store.
10071 */
10072IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10073{
10074 /* The lazy approach for now... */
10075 if ( (GCPtrMem & 15)
10076 && !(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10077 return iemRaiseGeneralProtectionFault0(pVCpu);
10078
10079 PRTUINT128U pu128Dst;
10080 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10081 if (rc == VINF_SUCCESS)
10082 {
10083 pu128Dst->au64[0] = u128Value.au64[0];
10084 pu128Dst->au64[1] = u128Value.au64[1];
10085 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10086 }
10087 return rc;
10088}
10089
10090
10091#ifdef IEM_WITH_SETJMP
10092/**
10093 * Stores a data dqword, SSE aligned.
10094 *
10095 * @returns Strict VBox status code.
10096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10097 * @param iSegReg The index of the segment register to use for
10098 * this access. The base and limits are checked.
10099 * @param GCPtrMem The address of the guest memory.
10100 * @param u128Value The value to store.
10101 */
10102DECL_NO_INLINE(IEM_STATIC, void)
10103iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10104{
10105 /* The lazy approach for now... */
10106 if ( (GCPtrMem & 15) == 0
10107 || (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10108 {
10109 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10110 pu128Dst->au64[0] = u128Value.au64[0];
10111 pu128Dst->au64[1] = u128Value.au64[1];
10112 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10113 return;
10114 }
10115
10116 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10117 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10118}
10119#endif
10120
10121
10122/**
10123 * Stores a data dqword.
10124 *
10125 * @returns Strict VBox status code.
10126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10127 * @param iSegReg The index of the segment register to use for
10128 * this access. The base and limits are checked.
10129 * @param GCPtrMem The address of the guest memory.
10130 * @param pu256Value Pointer to the value to store.
10131 */
10132IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10133{
10134 /* The lazy approach for now... */
10135 PRTUINT256U pu256Dst;
10136 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10137 if (rc == VINF_SUCCESS)
10138 {
10139 pu256Dst->au64[0] = pu256Value->au64[0];
10140 pu256Dst->au64[1] = pu256Value->au64[1];
10141 pu256Dst->au64[2] = pu256Value->au64[2];
10142 pu256Dst->au64[3] = pu256Value->au64[3];
10143 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10144 }
10145 return rc;
10146}
10147
10148
10149#ifdef IEM_WITH_SETJMP
10150/**
10151 * Stores a data dqword, longjmp on error.
10152 *
10153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10154 * @param iSegReg The index of the segment register to use for
10155 * this access. The base and limits are checked.
10156 * @param GCPtrMem The address of the guest memory.
10157 * @param pu256Value Pointer to the value to store.
10158 */
10159IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10160{
10161 /* The lazy approach for now... */
10162 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10163 pu256Dst->au64[0] = pu256Value->au64[0];
10164 pu256Dst->au64[1] = pu256Value->au64[1];
10165 pu256Dst->au64[2] = pu256Value->au64[2];
10166 pu256Dst->au64[3] = pu256Value->au64[3];
10167 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10168}
10169#endif
10170
10171
10172/**
10173 * Stores a data dqword, AVX aligned.
10174 *
10175 * @returns Strict VBox status code.
10176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10177 * @param iSegReg The index of the segment register to use for
10178 * this access. The base and limits are checked.
10179 * @param GCPtrMem The address of the guest memory.
10180 * @param pu256Value Pointer to the value to store.
10181 */
10182IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10183{
10184 /* The lazy approach for now... */
10185 if (GCPtrMem & 31)
10186 return iemRaiseGeneralProtectionFault0(pVCpu);
10187
10188 PRTUINT256U pu256Dst;
10189 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10190 if (rc == VINF_SUCCESS)
10191 {
10192 pu256Dst->au64[0] = pu256Value->au64[0];
10193 pu256Dst->au64[1] = pu256Value->au64[1];
10194 pu256Dst->au64[2] = pu256Value->au64[2];
10195 pu256Dst->au64[3] = pu256Value->au64[3];
10196 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10197 }
10198 return rc;
10199}
10200
10201
10202#ifdef IEM_WITH_SETJMP
10203/**
10204 * Stores a data dqword, AVX aligned.
10205 *
10206 * @returns Strict VBox status code.
10207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10208 * @param iSegReg The index of the segment register to use for
10209 * this access. The base and limits are checked.
10210 * @param GCPtrMem The address of the guest memory.
10211 * @param pu256Value Pointer to the value to store.
10212 */
10213DECL_NO_INLINE(IEM_STATIC, void)
10214iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10215{
10216 /* The lazy approach for now... */
10217 if ((GCPtrMem & 31) == 0)
10218 {
10219 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10220 pu256Dst->au64[0] = pu256Value->au64[0];
10221 pu256Dst->au64[1] = pu256Value->au64[1];
10222 pu256Dst->au64[2] = pu256Value->au64[2];
10223 pu256Dst->au64[3] = pu256Value->au64[3];
10224 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10225 return;
10226 }
10227
10228 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10229 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10230}
10231#endif
10232
10233
10234/**
10235 * Stores a descriptor register (sgdt, sidt).
10236 *
10237 * @returns Strict VBox status code.
10238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10239 * @param cbLimit The limit.
10240 * @param GCPtrBase The base address.
10241 * @param iSegReg The index of the segment register to use for
10242 * this access. The base and limits are checked.
10243 * @param GCPtrMem The address of the guest memory.
10244 */
10245IEM_STATIC VBOXSTRICTRC
10246iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10247{
10248 /*
10249 * The SIDT and SGDT instructions actually stores the data using two
10250 * independent writes. The instructions does not respond to opsize prefixes.
10251 */
10252 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10253 if (rcStrict == VINF_SUCCESS)
10254 {
10255 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10256 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10257 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10258 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10259 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10260 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10261 else
10262 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10263 }
10264 return rcStrict;
10265}
10266
10267
10268/**
10269 * Pushes a word onto the stack.
10270 *
10271 * @returns Strict VBox status code.
10272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10273 * @param u16Value The value to push.
10274 */
10275IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10276{
10277 /* Increment the stack pointer. */
10278 uint64_t uNewRsp;
10279 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10280
10281 /* Write the word the lazy way. */
10282 uint16_t *pu16Dst;
10283 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10284 if (rc == VINF_SUCCESS)
10285 {
10286 *pu16Dst = u16Value;
10287 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10288 }
10289
10290 /* Commit the new RSP value unless we an access handler made trouble. */
10291 if (rc == VINF_SUCCESS)
10292 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10293
10294 return rc;
10295}
10296
10297
10298/**
10299 * Pushes a dword onto the stack.
10300 *
10301 * @returns Strict VBox status code.
10302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10303 * @param u32Value The value to push.
10304 */
10305IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10306{
10307 /* Increment the stack pointer. */
10308 uint64_t uNewRsp;
10309 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10310
10311 /* Write the dword the lazy way. */
10312 uint32_t *pu32Dst;
10313 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10314 if (rc == VINF_SUCCESS)
10315 {
10316 *pu32Dst = u32Value;
10317 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10318 }
10319
10320 /* Commit the new RSP value unless we an access handler made trouble. */
10321 if (rc == VINF_SUCCESS)
10322 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10323
10324 return rc;
10325}
10326
10327
10328/**
10329 * Pushes a dword segment register value onto the stack.
10330 *
10331 * @returns Strict VBox status code.
10332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10333 * @param u32Value The value to push.
10334 */
10335IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10336{
10337 /* Increment the stack pointer. */
10338 uint64_t uNewRsp;
10339 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10340
10341 /* The intel docs talks about zero extending the selector register
10342 value. My actual intel CPU here might be zero extending the value
10343 but it still only writes the lower word... */
10344 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10345 * happens when crossing an electric page boundrary, is the high word checked
10346 * for write accessibility or not? Probably it is. What about segment limits?
10347 * It appears this behavior is also shared with trap error codes.
10348 *
10349 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10350 * ancient hardware when it actually did change. */
10351 uint16_t *pu16Dst;
10352 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10353 if (rc == VINF_SUCCESS)
10354 {
10355 *pu16Dst = (uint16_t)u32Value;
10356 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10357 }
10358
10359 /* Commit the new RSP value unless we an access handler made trouble. */
10360 if (rc == VINF_SUCCESS)
10361 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10362
10363 return rc;
10364}
10365
10366
10367/**
10368 * Pushes a qword onto the stack.
10369 *
10370 * @returns Strict VBox status code.
10371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10372 * @param u64Value The value to push.
10373 */
10374IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10375{
10376 /* Increment the stack pointer. */
10377 uint64_t uNewRsp;
10378 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10379
10380 /* Write the word the lazy way. */
10381 uint64_t *pu64Dst;
10382 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10383 if (rc == VINF_SUCCESS)
10384 {
10385 *pu64Dst = u64Value;
10386 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10387 }
10388
10389 /* Commit the new RSP value unless we an access handler made trouble. */
10390 if (rc == VINF_SUCCESS)
10391 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10392
10393 return rc;
10394}
10395
10396
10397/**
10398 * Pops a word from the stack.
10399 *
10400 * @returns Strict VBox status code.
10401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10402 * @param pu16Value Where to store the popped value.
10403 */
10404IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10405{
10406 /* Increment the stack pointer. */
10407 uint64_t uNewRsp;
10408 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10409
10410 /* Write the word the lazy way. */
10411 uint16_t const *pu16Src;
10412 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10413 if (rc == VINF_SUCCESS)
10414 {
10415 *pu16Value = *pu16Src;
10416 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10417
10418 /* Commit the new RSP value. */
10419 if (rc == VINF_SUCCESS)
10420 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10421 }
10422
10423 return rc;
10424}
10425
10426
10427/**
10428 * Pops a dword from the stack.
10429 *
10430 * @returns Strict VBox status code.
10431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10432 * @param pu32Value Where to store the popped value.
10433 */
10434IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10435{
10436 /* Increment the stack pointer. */
10437 uint64_t uNewRsp;
10438 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10439
10440 /* Write the word the lazy way. */
10441 uint32_t const *pu32Src;
10442 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10443 if (rc == VINF_SUCCESS)
10444 {
10445 *pu32Value = *pu32Src;
10446 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10447
10448 /* Commit the new RSP value. */
10449 if (rc == VINF_SUCCESS)
10450 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10451 }
10452
10453 return rc;
10454}
10455
10456
10457/**
10458 * Pops a qword from the stack.
10459 *
10460 * @returns Strict VBox status code.
10461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10462 * @param pu64Value Where to store the popped value.
10463 */
10464IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10465{
10466 /* Increment the stack pointer. */
10467 uint64_t uNewRsp;
10468 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10469
10470 /* Write the word the lazy way. */
10471 uint64_t const *pu64Src;
10472 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10473 if (rc == VINF_SUCCESS)
10474 {
10475 *pu64Value = *pu64Src;
10476 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10477
10478 /* Commit the new RSP value. */
10479 if (rc == VINF_SUCCESS)
10480 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10481 }
10482
10483 return rc;
10484}
10485
10486
10487/**
10488 * Pushes a word onto the stack, using a temporary stack pointer.
10489 *
10490 * @returns Strict VBox status code.
10491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10492 * @param u16Value The value to push.
10493 * @param pTmpRsp Pointer to the temporary stack pointer.
10494 */
10495IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10496{
10497 /* Increment the stack pointer. */
10498 RTUINT64U NewRsp = *pTmpRsp;
10499 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10500
10501 /* Write the word the lazy way. */
10502 uint16_t *pu16Dst;
10503 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10504 if (rc == VINF_SUCCESS)
10505 {
10506 *pu16Dst = u16Value;
10507 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10508 }
10509
10510 /* Commit the new RSP value unless we an access handler made trouble. */
10511 if (rc == VINF_SUCCESS)
10512 *pTmpRsp = NewRsp;
10513
10514 return rc;
10515}
10516
10517
10518/**
10519 * Pushes a dword onto the stack, using a temporary stack pointer.
10520 *
10521 * @returns Strict VBox status code.
10522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10523 * @param u32Value The value to push.
10524 * @param pTmpRsp Pointer to the temporary stack pointer.
10525 */
10526IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10527{
10528 /* Increment the stack pointer. */
10529 RTUINT64U NewRsp = *pTmpRsp;
10530 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10531
10532 /* Write the word the lazy way. */
10533 uint32_t *pu32Dst;
10534 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10535 if (rc == VINF_SUCCESS)
10536 {
10537 *pu32Dst = u32Value;
10538 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10539 }
10540
10541 /* Commit the new RSP value unless we an access handler made trouble. */
10542 if (rc == VINF_SUCCESS)
10543 *pTmpRsp = NewRsp;
10544
10545 return rc;
10546}
10547
10548
10549/**
10550 * Pushes a dword onto the stack, using a temporary stack pointer.
10551 *
10552 * @returns Strict VBox status code.
10553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10554 * @param u64Value The value to push.
10555 * @param pTmpRsp Pointer to the temporary stack pointer.
10556 */
10557IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10558{
10559 /* Increment the stack pointer. */
10560 RTUINT64U NewRsp = *pTmpRsp;
10561 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10562
10563 /* Write the word the lazy way. */
10564 uint64_t *pu64Dst;
10565 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10566 if (rc == VINF_SUCCESS)
10567 {
10568 *pu64Dst = u64Value;
10569 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10570 }
10571
10572 /* Commit the new RSP value unless we an access handler made trouble. */
10573 if (rc == VINF_SUCCESS)
10574 *pTmpRsp = NewRsp;
10575
10576 return rc;
10577}
10578
10579
10580/**
10581 * Pops a word from the stack, using a temporary stack pointer.
10582 *
10583 * @returns Strict VBox status code.
10584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10585 * @param pu16Value Where to store the popped value.
10586 * @param pTmpRsp Pointer to the temporary stack pointer.
10587 */
10588IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10589{
10590 /* Increment the stack pointer. */
10591 RTUINT64U NewRsp = *pTmpRsp;
10592 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10593
10594 /* Write the word the lazy way. */
10595 uint16_t const *pu16Src;
10596 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10597 if (rc == VINF_SUCCESS)
10598 {
10599 *pu16Value = *pu16Src;
10600 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10601
10602 /* Commit the new RSP value. */
10603 if (rc == VINF_SUCCESS)
10604 *pTmpRsp = NewRsp;
10605 }
10606
10607 return rc;
10608}
10609
10610
10611/**
10612 * Pops a dword from the stack, using a temporary stack pointer.
10613 *
10614 * @returns Strict VBox status code.
10615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10616 * @param pu32Value Where to store the popped value.
10617 * @param pTmpRsp Pointer to the temporary stack pointer.
10618 */
10619IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10620{
10621 /* Increment the stack pointer. */
10622 RTUINT64U NewRsp = *pTmpRsp;
10623 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10624
10625 /* Write the word the lazy way. */
10626 uint32_t const *pu32Src;
10627 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10628 if (rc == VINF_SUCCESS)
10629 {
10630 *pu32Value = *pu32Src;
10631 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10632
10633 /* Commit the new RSP value. */
10634 if (rc == VINF_SUCCESS)
10635 *pTmpRsp = NewRsp;
10636 }
10637
10638 return rc;
10639}
10640
10641
10642/**
10643 * Pops a qword from the stack, using a temporary stack pointer.
10644 *
10645 * @returns Strict VBox status code.
10646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10647 * @param pu64Value Where to store the popped value.
10648 * @param pTmpRsp Pointer to the temporary stack pointer.
10649 */
10650IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10651{
10652 /* Increment the stack pointer. */
10653 RTUINT64U NewRsp = *pTmpRsp;
10654 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10655
10656 /* Write the word the lazy way. */
10657 uint64_t const *pu64Src;
10658 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10659 if (rcStrict == VINF_SUCCESS)
10660 {
10661 *pu64Value = *pu64Src;
10662 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10663
10664 /* Commit the new RSP value. */
10665 if (rcStrict == VINF_SUCCESS)
10666 *pTmpRsp = NewRsp;
10667 }
10668
10669 return rcStrict;
10670}
10671
10672
10673/**
10674 * Begin a special stack push (used by interrupt, exceptions and such).
10675 *
10676 * This will raise \#SS or \#PF if appropriate.
10677 *
10678 * @returns Strict VBox status code.
10679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10680 * @param cbMem The number of bytes to push onto the stack.
10681 * @param ppvMem Where to return the pointer to the stack memory.
10682 * As with the other memory functions this could be
10683 * direct access or bounce buffered access, so
10684 * don't commit register until the commit call
10685 * succeeds.
10686 * @param puNewRsp Where to return the new RSP value. This must be
10687 * passed unchanged to
10688 * iemMemStackPushCommitSpecial().
10689 */
10690IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10691{
10692 Assert(cbMem < UINT8_MAX);
10693 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10694 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10695}
10696
10697
10698/**
10699 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10700 *
10701 * This will update the rSP.
10702 *
10703 * @returns Strict VBox status code.
10704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10705 * @param pvMem The pointer returned by
10706 * iemMemStackPushBeginSpecial().
10707 * @param uNewRsp The new RSP value returned by
10708 * iemMemStackPushBeginSpecial().
10709 */
10710IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10711{
10712 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10713 if (rcStrict == VINF_SUCCESS)
10714 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10715 return rcStrict;
10716}
10717
10718
10719/**
10720 * Begin a special stack pop (used by iret, retf and such).
10721 *
10722 * This will raise \#SS or \#PF if appropriate.
10723 *
10724 * @returns Strict VBox status code.
10725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10726 * @param cbMem The number of bytes to pop from the stack.
10727 * @param ppvMem Where to return the pointer to the stack memory.
10728 * @param puNewRsp Where to return the new RSP value. This must be
10729 * assigned to CPUMCTX::rsp manually some time
10730 * after iemMemStackPopDoneSpecial() has been
10731 * called.
10732 */
10733IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10734{
10735 Assert(cbMem < UINT8_MAX);
10736 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10737 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10738}
10739
10740
10741/**
10742 * Continue a special stack pop (used by iret and retf).
10743 *
10744 * This will raise \#SS or \#PF if appropriate.
10745 *
10746 * @returns Strict VBox status code.
10747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10748 * @param cbMem The number of bytes to pop from the stack.
10749 * @param ppvMem Where to return the pointer to the stack memory.
10750 * @param puNewRsp Where to return the new RSP value. This must be
10751 * assigned to CPUMCTX::rsp manually some time
10752 * after iemMemStackPopDoneSpecial() has been
10753 * called.
10754 */
10755IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10756{
10757 Assert(cbMem < UINT8_MAX);
10758 RTUINT64U NewRsp;
10759 NewRsp.u = *puNewRsp;
10760 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10761 *puNewRsp = NewRsp.u;
10762 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10763}
10764
10765
10766/**
10767 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10768 * iemMemStackPopContinueSpecial).
10769 *
10770 * The caller will manually commit the rSP.
10771 *
10772 * @returns Strict VBox status code.
10773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10774 * @param pvMem The pointer returned by
10775 * iemMemStackPopBeginSpecial() or
10776 * iemMemStackPopContinueSpecial().
10777 */
10778IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10779{
10780 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10781}
10782
10783
10784/**
10785 * Fetches a system table byte.
10786 *
10787 * @returns Strict VBox status code.
10788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10789 * @param pbDst Where to return the byte.
10790 * @param iSegReg The index of the segment register to use for
10791 * this access. The base and limits are checked.
10792 * @param GCPtrMem The address of the guest memory.
10793 */
10794IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10795{
10796 /* The lazy approach for now... */
10797 uint8_t const *pbSrc;
10798 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10799 if (rc == VINF_SUCCESS)
10800 {
10801 *pbDst = *pbSrc;
10802 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10803 }
10804 return rc;
10805}
10806
10807
10808/**
10809 * Fetches a system table word.
10810 *
10811 * @returns Strict VBox status code.
10812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10813 * @param pu16Dst Where to return the word.
10814 * @param iSegReg The index of the segment register to use for
10815 * this access. The base and limits are checked.
10816 * @param GCPtrMem The address of the guest memory.
10817 */
10818IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10819{
10820 /* The lazy approach for now... */
10821 uint16_t const *pu16Src;
10822 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10823 if (rc == VINF_SUCCESS)
10824 {
10825 *pu16Dst = *pu16Src;
10826 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10827 }
10828 return rc;
10829}
10830
10831
10832/**
10833 * Fetches a system table dword.
10834 *
10835 * @returns Strict VBox status code.
10836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10837 * @param pu32Dst Where to return the dword.
10838 * @param iSegReg The index of the segment register to use for
10839 * this access. The base and limits are checked.
10840 * @param GCPtrMem The address of the guest memory.
10841 */
10842IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10843{
10844 /* The lazy approach for now... */
10845 uint32_t const *pu32Src;
10846 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10847 if (rc == VINF_SUCCESS)
10848 {
10849 *pu32Dst = *pu32Src;
10850 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10851 }
10852 return rc;
10853}
10854
10855
10856/**
10857 * Fetches a system table qword.
10858 *
10859 * @returns Strict VBox status code.
10860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10861 * @param pu64Dst Where to return the qword.
10862 * @param iSegReg The index of the segment register to use for
10863 * this access. The base and limits are checked.
10864 * @param GCPtrMem The address of the guest memory.
10865 */
10866IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10867{
10868 /* The lazy approach for now... */
10869 uint64_t const *pu64Src;
10870 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10871 if (rc == VINF_SUCCESS)
10872 {
10873 *pu64Dst = *pu64Src;
10874 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10875 }
10876 return rc;
10877}
10878
10879
10880/**
10881 * Fetches a descriptor table entry with caller specified error code.
10882 *
10883 * @returns Strict VBox status code.
10884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10885 * @param pDesc Where to return the descriptor table entry.
10886 * @param uSel The selector which table entry to fetch.
10887 * @param uXcpt The exception to raise on table lookup error.
10888 * @param uErrorCode The error code associated with the exception.
10889 */
10890IEM_STATIC VBOXSTRICTRC
10891iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10892{
10893 AssertPtr(pDesc);
10894 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10895
10896 /** @todo did the 286 require all 8 bytes to be accessible? */
10897 /*
10898 * Get the selector table base and check bounds.
10899 */
10900 RTGCPTR GCPtrBase;
10901 if (uSel & X86_SEL_LDT)
10902 {
10903 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10904 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10905 {
10906 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10907 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10908 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10909 uErrorCode, 0);
10910 }
10911
10912 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10913 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10914 }
10915 else
10916 {
10917 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10918 {
10919 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10920 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10921 uErrorCode, 0);
10922 }
10923 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10924 }
10925
10926 /*
10927 * Read the legacy descriptor and maybe the long mode extensions if
10928 * required.
10929 */
10930 VBOXSTRICTRC rcStrict;
10931 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10932 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10933 else
10934 {
10935 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10936 if (rcStrict == VINF_SUCCESS)
10937 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10938 if (rcStrict == VINF_SUCCESS)
10939 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10940 if (rcStrict == VINF_SUCCESS)
10941 pDesc->Legacy.au16[3] = 0;
10942 else
10943 return rcStrict;
10944 }
10945
10946 if (rcStrict == VINF_SUCCESS)
10947 {
10948 if ( !IEM_IS_LONG_MODE(pVCpu)
10949 || pDesc->Legacy.Gen.u1DescType)
10950 pDesc->Long.au64[1] = 0;
10951 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10952 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10953 else
10954 {
10955 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10956 /** @todo is this the right exception? */
10957 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10958 }
10959 }
10960 return rcStrict;
10961}
10962
10963
10964/**
10965 * Fetches a descriptor table entry.
10966 *
10967 * @returns Strict VBox status code.
10968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10969 * @param pDesc Where to return the descriptor table entry.
10970 * @param uSel The selector which table entry to fetch.
10971 * @param uXcpt The exception to raise on table lookup error.
10972 */
10973IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10974{
10975 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10976}
10977
10978
10979/**
10980 * Fakes a long mode stack selector for SS = 0.
10981 *
10982 * @param pDescSs Where to return the fake stack descriptor.
10983 * @param uDpl The DPL we want.
10984 */
10985IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10986{
10987 pDescSs->Long.au64[0] = 0;
10988 pDescSs->Long.au64[1] = 0;
10989 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10990 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10991 pDescSs->Long.Gen.u2Dpl = uDpl;
10992 pDescSs->Long.Gen.u1Present = 1;
10993 pDescSs->Long.Gen.u1Long = 1;
10994}
10995
10996
10997/**
10998 * Marks the selector descriptor as accessed (only non-system descriptors).
10999 *
11000 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11001 * will therefore skip the limit checks.
11002 *
11003 * @returns Strict VBox status code.
11004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11005 * @param uSel The selector.
11006 */
11007IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11008{
11009 /*
11010 * Get the selector table base and calculate the entry address.
11011 */
11012 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11013 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11014 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11015 GCPtr += uSel & X86_SEL_MASK;
11016
11017 /*
11018 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11019 * ugly stuff to avoid this. This will make sure it's an atomic access
11020 * as well more or less remove any question about 8-bit or 32-bit accesss.
11021 */
11022 VBOXSTRICTRC rcStrict;
11023 uint32_t volatile *pu32;
11024 if ((GCPtr & 3) == 0)
11025 {
11026 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11027 GCPtr += 2 + 2;
11028 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11029 if (rcStrict != VINF_SUCCESS)
11030 return rcStrict;
11031 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11032 }
11033 else
11034 {
11035 /* The misaligned GDT/LDT case, map the whole thing. */
11036 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11037 if (rcStrict != VINF_SUCCESS)
11038 return rcStrict;
11039 switch ((uintptr_t)pu32 & 3)
11040 {
11041 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11042 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11043 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11044 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11045 }
11046 }
11047
11048 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11049}
11050
11051/** @} */
11052
11053
11054/*
11055 * Include the C/C++ implementation of instruction.
11056 */
11057#include "IEMAllCImpl.cpp.h"
11058
11059
11060
11061/** @name "Microcode" macros.
11062 *
11063 * The idea is that we should be able to use the same code to interpret
11064 * instructions as well as recompiler instructions. Thus this obfuscation.
11065 *
11066 * @{
11067 */
11068#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11069#define IEM_MC_END() }
11070#define IEM_MC_PAUSE() do {} while (0)
11071#define IEM_MC_CONTINUE() do {} while (0)
11072
11073/** Internal macro. */
11074#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11075 do \
11076 { \
11077 VBOXSTRICTRC rcStrict2 = a_Expr; \
11078 if (rcStrict2 != VINF_SUCCESS) \
11079 return rcStrict2; \
11080 } while (0)
11081
11082
11083#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11084#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11085#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11086#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11087#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11088#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11089#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11090#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11091#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11092 do { \
11093 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11094 return iemRaiseDeviceNotAvailable(pVCpu); \
11095 } while (0)
11096#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11097 do { \
11098 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11099 return iemRaiseDeviceNotAvailable(pVCpu); \
11100 } while (0)
11101#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11102 do { \
11103 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW & X86_FSW_ES) \
11104 return iemRaiseMathFault(pVCpu); \
11105 } while (0)
11106#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11107 do { \
11108 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11109 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11110 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11111 return iemRaiseUndefinedOpcode(pVCpu); \
11112 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11113 return iemRaiseDeviceNotAvailable(pVCpu); \
11114 } while (0)
11115#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11116 do { \
11117 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11118 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11119 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11120 return iemRaiseUndefinedOpcode(pVCpu); \
11121 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11122 return iemRaiseDeviceNotAvailable(pVCpu); \
11123 } while (0)
11124#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11125 do { \
11126 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11127 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11128 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11129 return iemRaiseUndefinedOpcode(pVCpu); \
11130 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11131 return iemRaiseDeviceNotAvailable(pVCpu); \
11132 } while (0)
11133#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11134 do { \
11135 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11136 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11137 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11138 return iemRaiseUndefinedOpcode(pVCpu); \
11139 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11140 return iemRaiseDeviceNotAvailable(pVCpu); \
11141 } while (0)
11142#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11143 do { \
11144 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11145 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11146 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11147 return iemRaiseUndefinedOpcode(pVCpu); \
11148 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11149 return iemRaiseDeviceNotAvailable(pVCpu); \
11150 } while (0)
11151#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11152 do { \
11153 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11154 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11155 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11156 return iemRaiseUndefinedOpcode(pVCpu); \
11157 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11158 return iemRaiseDeviceNotAvailable(pVCpu); \
11159 } while (0)
11160#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11161 do { \
11162 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11163 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11164 return iemRaiseUndefinedOpcode(pVCpu); \
11165 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11166 return iemRaiseDeviceNotAvailable(pVCpu); \
11167 } while (0)
11168#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11169 do { \
11170 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11171 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11172 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11173 return iemRaiseUndefinedOpcode(pVCpu); \
11174 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11175 return iemRaiseDeviceNotAvailable(pVCpu); \
11176 } while (0)
11177#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11178 do { \
11179 if (pVCpu->iem.s.uCpl != 0) \
11180 return iemRaiseGeneralProtectionFault0(pVCpu); \
11181 } while (0)
11182#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11183 do { \
11184 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11185 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11186 } while (0)
11187#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11188 do { \
11189 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11190 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11191 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11192 return iemRaiseUndefinedOpcode(pVCpu); \
11193 } while (0)
11194#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11195 do { \
11196 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11197 return iemRaiseGeneralProtectionFault0(pVCpu); \
11198 } while (0)
11199
11200
11201#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11202#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11203#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11204#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11205#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11206#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11207#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11208 uint32_t a_Name; \
11209 uint32_t *a_pName = &a_Name
11210#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11211 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11212
11213#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11214#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11215
11216#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11217#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11218#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11219#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11220#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11221#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11222#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11223#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11224#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11225#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11226#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11227#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11228#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11229#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11230#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11231#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11232#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11233#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11234 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11235 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11236 } while (0)
11237#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11238 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11239 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11240 } while (0)
11241#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11242 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11243 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11244 } while (0)
11245/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11246#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11247 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11248 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11249 } while (0)
11250#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11251 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11252 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11253 } while (0)
11254/** @note Not for IOPL or IF testing or modification. */
11255#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11256#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11257#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW
11258#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW
11259
11260#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11261#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11262#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11263#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11264#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11265#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11266#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11267#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11268#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11269#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11270/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11271#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11272 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11273 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11274 } while (0)
11275#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11276 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11277 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11278 } while (0)
11279#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11280 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11281
11282
11283#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11284#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11285/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11286 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11287#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11288#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11289/** @note Not for IOPL or IF testing or modification. */
11290#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11291
11292#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11293#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11294#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11295 do { \
11296 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11297 *pu32Reg += (a_u32Value); \
11298 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11299 } while (0)
11300#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11301
11302#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11303#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11304#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11305 do { \
11306 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11307 *pu32Reg -= (a_u32Value); \
11308 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11309 } while (0)
11310#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11311#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11312
11313#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11314#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11315#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11316#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11317#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11318#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11319#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11320
11321#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11322#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11323#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11324#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11325
11326#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11327#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11328#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11329
11330#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11331#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11332#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11333
11334#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11335#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11336#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11337
11338#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11339#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11340#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11341
11342#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11343
11344#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11345
11346#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11347#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11348#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11349 do { \
11350 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11351 *pu32Reg &= (a_u32Value); \
11352 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11353 } while (0)
11354#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11355
11356#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11357#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11358#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11359 do { \
11360 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11361 *pu32Reg |= (a_u32Value); \
11362 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11363 } while (0)
11364#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11365
11366
11367/** @note Not for IOPL or IF modification. */
11368#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11369/** @note Not for IOPL or IF modification. */
11370#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11371/** @note Not for IOPL or IF modification. */
11372#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11373
11374#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11375
11376/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11377#define IEM_MC_FPU_TO_MMX_MODE() do { \
11378 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FSW &= ~X86_FSW_TOP_MASK; \
11379 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0xff; \
11380 } while (0)
11381
11382/** Switches the FPU state from MMX mode (FTW=0xffff). */
11383#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11384 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FTW = 0; \
11385 } while (0)
11386
11387#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11388 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx; } while (0)
11389#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11390 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11391#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11392 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11393 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11394 } while (0)
11395#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11396 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11397 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11398 } while (0)
11399#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11400 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11401#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11402 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11403#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11404 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aRegs[(a_iMReg)].mmx)
11405
11406#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11407 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; \
11408 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; \
11409 } while (0)
11410#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11411 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11412#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11413 do { (a_u32Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11414#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11415 do { (a_u64Value) = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11416#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11417 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11418 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11419 } while (0)
11420#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11421 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11422#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11423 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11424 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11425 } while (0)
11426#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11427 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11428#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11429 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11430 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = 0; \
11431 } while (0)
11432#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11433 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11434#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11435 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11436#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11437 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].uXmm)
11438#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11439 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXReg)].au64[0])
11440#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11441 do { pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[0] \
11442 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[0]; \
11443 pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegDst)].au64[1] \
11444 = pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aXMM[(a_iXRegSrc)].au64[1]; \
11445 } while (0)
11446
11447#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11448 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11449 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11450 (a_u32Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au32[0]; \
11451 } while (0)
11452#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11453 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11454 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11455 (a_u64Dst) = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11456 } while (0)
11457#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11458 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11459 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11460 (a_u128Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11461 (a_u128Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11462 } while (0)
11463#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11464 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11465 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11466 (a_u256Dst).au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11467 (a_u256Dst).au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11468 (a_u256Dst).au64[2] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11469 (a_u256Dst).au64[3] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11470 } while (0)
11471
11472#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_pXState, a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11473#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11474 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11475 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11476 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11477 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11478 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11479 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11480 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11481 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11482 } while (0)
11483#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11484 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11485 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11486 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11487 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11488 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11489 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11490 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11491 } while (0)
11492#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11493 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11494 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11495 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11496 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11497 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11498 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11499 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11500 } while (0)
11501#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11502 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11503 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11504 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11505 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11506 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11507 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11508 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11509 } while (0)
11510
11511#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11512 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11513#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11514 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].uXmm)
11515#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11516 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.aYMM[(a_iYReg)].au64[0])
11517#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11518 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11519 uintptr_t const iYRegTmp = (a_iYReg); \
11520 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11521 pXStateTmp->u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11522 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegTmp); \
11523 } while (0)
11524
11525#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11526 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11527 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11528 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11529 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11530 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11531 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11532 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pXStateTmp->u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11533 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11534 } while (0)
11535#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11536 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11537 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11538 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11539 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11540 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[1]; \
11541 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11542 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11543 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11544 } while (0)
11545#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11546 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11547 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11548 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11549 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrcTmp].au64[0]; \
11550 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11551 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11552 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11553 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11554 } while (0)
11555
11556#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11557 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11558 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11559 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11560 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11561 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[0] = pXStateTmp->x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11562 pXStateTmp->x87.aXMM[iYRegDstTmp].au32[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11563 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11564 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11565 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11566 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11567 } while (0)
11568#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11569 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11570 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11571 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11572 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11573 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11574 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11575 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11576 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11577 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11578 } while (0)
11579#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11580 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11581 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11582 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11583 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11584 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = pXStateTmp->x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11585 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11586 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11587 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11588 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11589 } while (0)
11590#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11591 do { PX86XSAVEAREA pXStateTmp = pVCpu->cpum.GstCtx.CTX_SUFF(pXState); \
11592 uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11593 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11594 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11595 pXStateTmp->x87.aXMM[iYRegDstTmp].au64[1] = pXStateTmp->x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11596 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11597 pXStateTmp->u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11598 IEM_MC_INT_CLEAR_ZMM_256_UP(pXStateTmp, iYRegDstTmp); \
11599 } while (0)
11600
11601#ifndef IEM_WITH_SETJMP
11602# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11603 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11604# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11606# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11607 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11608#else
11609# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11610 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11611# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11612 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11613# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11614 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11615#endif
11616
11617#ifndef IEM_WITH_SETJMP
11618# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11619 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11620# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11621 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11622# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11624#else
11625# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11626 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11627# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11628 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11629# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11630 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11631#endif
11632
11633#ifndef IEM_WITH_SETJMP
11634# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11636# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11637 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11638# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11639 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11640#else
11641# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11642 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11643# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11644 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11645# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11646 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11647#endif
11648
11649#ifdef SOME_UNUSED_FUNCTION
11650# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11651 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11652#endif
11653
11654#ifndef IEM_WITH_SETJMP
11655# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11656 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11657# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11658 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11659# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11661# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11663#else
11664# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11665 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11666# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11667 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11668# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11669 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11670# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11671 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11672#endif
11673
11674#ifndef IEM_WITH_SETJMP
11675# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11677# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11679# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11680 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11681#else
11682# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11683 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11684# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11685 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11686# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11687 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11688#endif
11689
11690#ifndef IEM_WITH_SETJMP
11691# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11692 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11693# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11694 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11695#else
11696# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11697 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11698# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11699 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11700#endif
11701
11702#ifndef IEM_WITH_SETJMP
11703# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11705# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11706 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11707#else
11708# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11709 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11710# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11711 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11712#endif
11713
11714
11715
11716#ifndef IEM_WITH_SETJMP
11717# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11718 do { \
11719 uint8_t u8Tmp; \
11720 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11721 (a_u16Dst) = u8Tmp; \
11722 } while (0)
11723# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11724 do { \
11725 uint8_t u8Tmp; \
11726 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11727 (a_u32Dst) = u8Tmp; \
11728 } while (0)
11729# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11730 do { \
11731 uint8_t u8Tmp; \
11732 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11733 (a_u64Dst) = u8Tmp; \
11734 } while (0)
11735# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11736 do { \
11737 uint16_t u16Tmp; \
11738 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11739 (a_u32Dst) = u16Tmp; \
11740 } while (0)
11741# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11742 do { \
11743 uint16_t u16Tmp; \
11744 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11745 (a_u64Dst) = u16Tmp; \
11746 } while (0)
11747# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11748 do { \
11749 uint32_t u32Tmp; \
11750 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11751 (a_u64Dst) = u32Tmp; \
11752 } while (0)
11753#else /* IEM_WITH_SETJMP */
11754# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11755 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11756# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11757 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11758# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11759 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11760# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11761 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11762# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11763 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11764# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11765 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11766#endif /* IEM_WITH_SETJMP */
11767
11768#ifndef IEM_WITH_SETJMP
11769# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11770 do { \
11771 uint8_t u8Tmp; \
11772 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11773 (a_u16Dst) = (int8_t)u8Tmp; \
11774 } while (0)
11775# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11776 do { \
11777 uint8_t u8Tmp; \
11778 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11779 (a_u32Dst) = (int8_t)u8Tmp; \
11780 } while (0)
11781# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11782 do { \
11783 uint8_t u8Tmp; \
11784 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11785 (a_u64Dst) = (int8_t)u8Tmp; \
11786 } while (0)
11787# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11788 do { \
11789 uint16_t u16Tmp; \
11790 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11791 (a_u32Dst) = (int16_t)u16Tmp; \
11792 } while (0)
11793# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11794 do { \
11795 uint16_t u16Tmp; \
11796 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11797 (a_u64Dst) = (int16_t)u16Tmp; \
11798 } while (0)
11799# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11800 do { \
11801 uint32_t u32Tmp; \
11802 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11803 (a_u64Dst) = (int32_t)u32Tmp; \
11804 } while (0)
11805#else /* IEM_WITH_SETJMP */
11806# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11807 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11808# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11809 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11810# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11811 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11812# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11813 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11814# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11815 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11816# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11817 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11818#endif /* IEM_WITH_SETJMP */
11819
11820#ifndef IEM_WITH_SETJMP
11821# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11822 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11823# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11824 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11825# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11827# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11828 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11829#else
11830# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11831 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11832# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11833 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11834# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11835 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11836# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11837 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11838#endif
11839
11840#ifndef IEM_WITH_SETJMP
11841# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11842 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11843# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11844 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11845# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11846 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11847# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11848 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11849#else
11850# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11851 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11852# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11853 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11854# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11855 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11856# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11857 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11858#endif
11859
11860#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11861#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11862#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11863#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11864#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11865#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11866#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11867 do { \
11868 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11869 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11870 } while (0)
11871
11872#ifndef IEM_WITH_SETJMP
11873# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11874 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11875# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11876 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11877#else
11878# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11879 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11880# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11881 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11882#endif
11883
11884#ifndef IEM_WITH_SETJMP
11885# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11887# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11888 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11889#else
11890# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11891 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11892# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11893 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11894#endif
11895
11896
11897#define IEM_MC_PUSH_U16(a_u16Value) \
11898 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11899#define IEM_MC_PUSH_U32(a_u32Value) \
11900 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11901#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11902 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11903#define IEM_MC_PUSH_U64(a_u64Value) \
11904 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11905
11906#define IEM_MC_POP_U16(a_pu16Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11908#define IEM_MC_POP_U32(a_pu32Value) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11910#define IEM_MC_POP_U64(a_pu64Value) \
11911 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11912
11913/** Maps guest memory for direct or bounce buffered access.
11914 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11915 * @remarks May return.
11916 */
11917#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11918 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11919
11920/** Maps guest memory for direct or bounce buffered access.
11921 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11922 * @remarks May return.
11923 */
11924#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11925 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11926
11927/** Commits the memory and unmaps the guest memory.
11928 * @remarks May return.
11929 */
11930#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11931 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11932
11933/** Commits the memory and unmaps the guest memory unless the FPU status word
11934 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11935 * that would cause FLD not to store.
11936 *
11937 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11938 * store, while \#P will not.
11939 *
11940 * @remarks May in theory return - for now.
11941 */
11942#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11943 do { \
11944 if ( !(a_u16FSW & X86_FSW_ES) \
11945 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11946 & ~(pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_MASK_ALL) ) ) \
11947 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11948 } while (0)
11949
11950/** Calculate efficient address from R/M. */
11951#ifndef IEM_WITH_SETJMP
11952# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11953 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11954#else
11955# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11956 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11957#endif
11958
11959#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11960#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11961#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11962#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11963#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11964#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11965#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11966
11967/**
11968 * Defers the rest of the instruction emulation to a C implementation routine
11969 * and returns, only taking the standard parameters.
11970 *
11971 * @param a_pfnCImpl The pointer to the C routine.
11972 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11973 */
11974#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11975
11976/**
11977 * Defers the rest of instruction emulation to a C implementation routine and
11978 * returns, taking one argument in addition to the standard ones.
11979 *
11980 * @param a_pfnCImpl The pointer to the C routine.
11981 * @param a0 The argument.
11982 */
11983#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11984
11985/**
11986 * Defers the rest of the instruction emulation to a C implementation routine
11987 * and returns, taking two arguments in addition to the standard ones.
11988 *
11989 * @param a_pfnCImpl The pointer to the C routine.
11990 * @param a0 The first extra argument.
11991 * @param a1 The second extra argument.
11992 */
11993#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11994
11995/**
11996 * Defers the rest of the instruction emulation to a C implementation routine
11997 * and returns, taking three arguments in addition to the standard ones.
11998 *
11999 * @param a_pfnCImpl The pointer to the C routine.
12000 * @param a0 The first extra argument.
12001 * @param a1 The second extra argument.
12002 * @param a2 The third extra argument.
12003 */
12004#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12005
12006/**
12007 * Defers the rest of the instruction emulation to a C implementation routine
12008 * and returns, taking four arguments in addition to the standard ones.
12009 *
12010 * @param a_pfnCImpl The pointer to the C routine.
12011 * @param a0 The first extra argument.
12012 * @param a1 The second extra argument.
12013 * @param a2 The third extra argument.
12014 * @param a3 The fourth extra argument.
12015 */
12016#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12017
12018/**
12019 * Defers the rest of the instruction emulation to a C implementation routine
12020 * and returns, taking two arguments in addition to the standard ones.
12021 *
12022 * @param a_pfnCImpl The pointer to the C routine.
12023 * @param a0 The first extra argument.
12024 * @param a1 The second extra argument.
12025 * @param a2 The third extra argument.
12026 * @param a3 The fourth extra argument.
12027 * @param a4 The fifth extra argument.
12028 */
12029#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12030
12031/**
12032 * Defers the entire instruction emulation to a C implementation routine and
12033 * returns, only taking the standard parameters.
12034 *
12035 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12036 *
12037 * @param a_pfnCImpl The pointer to the C routine.
12038 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12039 */
12040#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12041
12042/**
12043 * Defers the entire instruction emulation to a C implementation routine and
12044 * returns, taking one argument in addition to the standard ones.
12045 *
12046 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12047 *
12048 * @param a_pfnCImpl The pointer to the C routine.
12049 * @param a0 The argument.
12050 */
12051#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12052
12053/**
12054 * Defers the entire instruction emulation to a C implementation routine and
12055 * returns, taking two arguments in addition to the standard ones.
12056 *
12057 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12058 *
12059 * @param a_pfnCImpl The pointer to the C routine.
12060 * @param a0 The first extra argument.
12061 * @param a1 The second extra argument.
12062 */
12063#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12064
12065/**
12066 * Defers the entire instruction emulation to a C implementation routine and
12067 * returns, taking three arguments in addition to the standard ones.
12068 *
12069 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12070 *
12071 * @param a_pfnCImpl The pointer to the C routine.
12072 * @param a0 The first extra argument.
12073 * @param a1 The second extra argument.
12074 * @param a2 The third extra argument.
12075 */
12076#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12077
12078/**
12079 * Calls a FPU assembly implementation taking one visible argument.
12080 *
12081 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12082 * @param a0 The first extra argument.
12083 */
12084#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12085 do { \
12086 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0)); \
12087 } while (0)
12088
12089/**
12090 * Calls a FPU assembly implementation taking two visible arguments.
12091 *
12092 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12093 * @param a0 The first extra argument.
12094 * @param a1 The second extra argument.
12095 */
12096#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12097 do { \
12098 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12099 } while (0)
12100
12101/**
12102 * Calls a FPU assembly implementation taking three visible arguments.
12103 *
12104 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12105 * @param a0 The first extra argument.
12106 * @param a1 The second extra argument.
12107 * @param a2 The third extra argument.
12108 */
12109#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12110 do { \
12111 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12112 } while (0)
12113
12114#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12115 do { \
12116 (a_FpuData).FSW = (a_FSW); \
12117 (a_FpuData).r80Result = *(a_pr80Value); \
12118 } while (0)
12119
12120/** Pushes FPU result onto the stack. */
12121#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12122 iemFpuPushResult(pVCpu, &a_FpuData)
12123/** Pushes FPU result onto the stack and sets the FPUDP. */
12124#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12125 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12126
12127/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12128#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12129 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12130
12131/** Stores FPU result in a stack register. */
12132#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12133 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12134/** Stores FPU result in a stack register and pops the stack. */
12135#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12136 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12137/** Stores FPU result in a stack register and sets the FPUDP. */
12138#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12139 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12140/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12141 * stack. */
12142#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12143 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12144
12145/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12146#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12147 iemFpuUpdateOpcodeAndIp(pVCpu)
12148/** Free a stack register (for FFREE and FFREEP). */
12149#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12150 iemFpuStackFree(pVCpu, a_iStReg)
12151/** Increment the FPU stack pointer. */
12152#define IEM_MC_FPU_STACK_INC_TOP() \
12153 iemFpuStackIncTop(pVCpu)
12154/** Decrement the FPU stack pointer. */
12155#define IEM_MC_FPU_STACK_DEC_TOP() \
12156 iemFpuStackDecTop(pVCpu)
12157
12158/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12159#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12160 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12161/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12162#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12163 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12164/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12165#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12166 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12167/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12168#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12169 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12170/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12171 * stack. */
12172#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12173 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12174/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12175#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12176 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12177
12178/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12179#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12180 iemFpuStackUnderflow(pVCpu, a_iStDst)
12181/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12182 * stack. */
12183#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12184 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12185/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12186 * FPUDS. */
12187#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12188 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12189/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12190 * FPUDS. Pops stack. */
12191#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12192 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12193/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12194 * stack twice. */
12195#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12196 iemFpuStackUnderflowThenPopPop(pVCpu)
12197/** Raises a FPU stack underflow exception for an instruction pushing a result
12198 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12199#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12200 iemFpuStackPushUnderflow(pVCpu)
12201/** Raises a FPU stack underflow exception for an instruction pushing a result
12202 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12203#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12204 iemFpuStackPushUnderflowTwo(pVCpu)
12205
12206/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12207 * FPUIP, FPUCS and FOP. */
12208#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12209 iemFpuStackPushOverflow(pVCpu)
12210/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12211 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12212#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12213 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12214/** Prepares for using the FPU state.
12215 * Ensures that we can use the host FPU in the current context (RC+R0.
12216 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12217#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12218/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12219#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12220/** Actualizes the guest FPU state so it can be accessed and modified. */
12221#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12222
12223/** Prepares for using the SSE state.
12224 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12225 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12226#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12227/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12228#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12229/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12230#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12231
12232/** Prepares for using the AVX state.
12233 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12234 * Ensures the guest AVX state in the CPUMCTX is up to date.
12235 * @note This will include the AVX512 state too when support for it is added
12236 * due to the zero extending feature of VEX instruction. */
12237#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12238/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12239#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12240/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12241#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12242
12243/**
12244 * Calls a MMX assembly implementation taking two visible arguments.
12245 *
12246 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12247 * @param a0 The first extra argument.
12248 * @param a1 The second extra argument.
12249 */
12250#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12251 do { \
12252 IEM_MC_PREPARE_FPU_USAGE(); \
12253 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12254 } while (0)
12255
12256/**
12257 * Calls a MMX assembly implementation taking three visible arguments.
12258 *
12259 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12260 * @param a0 The first extra argument.
12261 * @param a1 The second extra argument.
12262 * @param a2 The third extra argument.
12263 */
12264#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12265 do { \
12266 IEM_MC_PREPARE_FPU_USAGE(); \
12267 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12268 } while (0)
12269
12270
12271/**
12272 * Calls a SSE assembly implementation taking two visible arguments.
12273 *
12274 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12275 * @param a0 The first extra argument.
12276 * @param a1 The second extra argument.
12277 */
12278#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12279 do { \
12280 IEM_MC_PREPARE_SSE_USAGE(); \
12281 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1)); \
12282 } while (0)
12283
12284/**
12285 * Calls a SSE assembly implementation taking three visible arguments.
12286 *
12287 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12288 * @param a0 The first extra argument.
12289 * @param a1 The second extra argument.
12290 * @param a2 The third extra argument.
12291 */
12292#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12293 do { \
12294 IEM_MC_PREPARE_SSE_USAGE(); \
12295 a_pfnAImpl(&pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87, (a0), (a1), (a2)); \
12296 } while (0)
12297
12298
12299/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12300 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12301#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12302 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0)
12303
12304/**
12305 * Calls a AVX assembly implementation taking two visible arguments.
12306 *
12307 * There is one implicit zero'th argument, a pointer to the extended state.
12308 *
12309 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12310 * @param a1 The first extra argument.
12311 * @param a2 The second extra argument.
12312 */
12313#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12314 do { \
12315 IEM_MC_PREPARE_AVX_USAGE(); \
12316 a_pfnAImpl(pXState, (a1), (a2)); \
12317 } while (0)
12318
12319/**
12320 * Calls a AVX assembly implementation taking three visible arguments.
12321 *
12322 * There is one implicit zero'th argument, a pointer to the extended state.
12323 *
12324 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12325 * @param a1 The first extra argument.
12326 * @param a2 The second extra argument.
12327 * @param a3 The third extra argument.
12328 */
12329#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12330 do { \
12331 IEM_MC_PREPARE_AVX_USAGE(); \
12332 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12333 } while (0)
12334
12335/** @note Not for IOPL or IF testing. */
12336#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12337/** @note Not for IOPL or IF testing. */
12338#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12339/** @note Not for IOPL or IF testing. */
12340#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12341/** @note Not for IOPL or IF testing. */
12342#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12343/** @note Not for IOPL or IF testing. */
12344#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12345 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12346 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12347/** @note Not for IOPL or IF testing. */
12348#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12349 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12350 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12351/** @note Not for IOPL or IF testing. */
12352#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12353 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12354 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12355 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12356/** @note Not for IOPL or IF testing. */
12357#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12358 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12359 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12360 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12361#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12362#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12363#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12364/** @note Not for IOPL or IF testing. */
12365#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12366 if ( pVCpu->cpum.GstCtx.cx != 0 \
12367 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12368/** @note Not for IOPL or IF testing. */
12369#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12370 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12371 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12372/** @note Not for IOPL or IF testing. */
12373#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12374 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12375 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12376/** @note Not for IOPL or IF testing. */
12377#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12378 if ( pVCpu->cpum.GstCtx.cx != 0 \
12379 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12380/** @note Not for IOPL or IF testing. */
12381#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12382 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12383 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12384/** @note Not for IOPL or IF testing. */
12385#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12386 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12387 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12388#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12389#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12390
12391#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12392 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12393#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12394 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12395#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12396 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12397#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12398 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12399#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12400 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12401#define IEM_MC_IF_FCW_IM() \
12402 if (pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87.FCW & X86_FCW_IM) {
12403
12404#define IEM_MC_ELSE() } else {
12405#define IEM_MC_ENDIF() } do {} while (0)
12406
12407/** @} */
12408
12409
12410/** @name Opcode Debug Helpers.
12411 * @{
12412 */
12413#ifdef VBOX_WITH_STATISTICS
12414# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.CTX_SUFF(pStats)->a_Stats += 1; } while (0)
12415#else
12416# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12417#endif
12418
12419#ifdef DEBUG
12420# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12421 do { \
12422 IEMOP_INC_STATS(a_Stats); \
12423 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12424 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12425 } while (0)
12426
12427# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12428 do { \
12429 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12430 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12431 (void)RT_CONCAT(OP_,a_Upper); \
12432 (void)(a_fDisHints); \
12433 (void)(a_fIemHints); \
12434 } while (0)
12435
12436# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12437 do { \
12438 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12439 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12440 (void)RT_CONCAT(OP_,a_Upper); \
12441 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12442 (void)(a_fDisHints); \
12443 (void)(a_fIemHints); \
12444 } while (0)
12445
12446# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12447 do { \
12448 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12449 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12450 (void)RT_CONCAT(OP_,a_Upper); \
12451 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12452 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12453 (void)(a_fDisHints); \
12454 (void)(a_fIemHints); \
12455 } while (0)
12456
12457# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12458 do { \
12459 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12460 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12461 (void)RT_CONCAT(OP_,a_Upper); \
12462 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12463 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12464 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12465 (void)(a_fDisHints); \
12466 (void)(a_fIemHints); \
12467 } while (0)
12468
12469# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12470 do { \
12471 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12472 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12473 (void)RT_CONCAT(OP_,a_Upper); \
12474 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12475 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12476 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12477 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12478 (void)(a_fDisHints); \
12479 (void)(a_fIemHints); \
12480 } while (0)
12481
12482#else
12483# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12484
12485# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12486 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12487# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12488 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12489# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12490 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12491# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12492 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12493# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12494 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12495
12496#endif
12497
12498#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12499 IEMOP_MNEMONIC0EX(a_Lower, \
12500 #a_Lower, \
12501 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12502#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12503 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12504 #a_Lower " " #a_Op1, \
12505 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12506#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12507 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12508 #a_Lower " " #a_Op1 "," #a_Op2, \
12509 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12510#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12511 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12512 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12513 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12514#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12515 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12516 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12517 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12518
12519/** @} */
12520
12521
12522/** @name Opcode Helpers.
12523 * @{
12524 */
12525
12526#ifdef IN_RING3
12527# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12528 do { \
12529 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12530 else \
12531 { \
12532 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12533 return IEMOP_RAISE_INVALID_OPCODE(); \
12534 } \
12535 } while (0)
12536#else
12537# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12538 do { \
12539 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12540 else return IEMOP_RAISE_INVALID_OPCODE(); \
12541 } while (0)
12542#endif
12543
12544/** The instruction requires a 186 or later. */
12545#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12546# define IEMOP_HLP_MIN_186() do { } while (0)
12547#else
12548# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12549#endif
12550
12551/** The instruction requires a 286 or later. */
12552#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12553# define IEMOP_HLP_MIN_286() do { } while (0)
12554#else
12555# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12556#endif
12557
12558/** The instruction requires a 386 or later. */
12559#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12560# define IEMOP_HLP_MIN_386() do { } while (0)
12561#else
12562# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12563#endif
12564
12565/** The instruction requires a 386 or later if the given expression is true. */
12566#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12567# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12568#else
12569# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12570#endif
12571
12572/** The instruction requires a 486 or later. */
12573#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12574# define IEMOP_HLP_MIN_486() do { } while (0)
12575#else
12576# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12577#endif
12578
12579/** The instruction requires a Pentium (586) or later. */
12580#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12581# define IEMOP_HLP_MIN_586() do { } while (0)
12582#else
12583# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12584#endif
12585
12586/** The instruction requires a PentiumPro (686) or later. */
12587#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12588# define IEMOP_HLP_MIN_686() do { } while (0)
12589#else
12590# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12591#endif
12592
12593
12594/** The instruction raises an \#UD in real and V8086 mode. */
12595#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12596 do \
12597 { \
12598 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12599 else return IEMOP_RAISE_INVALID_OPCODE(); \
12600 } while (0)
12601
12602#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12603/** This instruction raises an \#UD in real and V8086 mode or when not using a
12604 * 64-bit code segment when in long mode (applicable to all VMX instructions
12605 * except VMCALL).
12606 */
12607#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12608 do \
12609 { \
12610 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12611 && ( !IEM_IS_LONG_MODE(pVCpu) \
12612 || IEM_IS_64BIT_CODE(pVCpu))) \
12613 { /* likely */ } \
12614 else \
12615 { \
12616 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12617 { \
12618 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12619 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12620 return IEMOP_RAISE_INVALID_OPCODE(); \
12621 } \
12622 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12623 { \
12624 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12625 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12626 return IEMOP_RAISE_INVALID_OPCODE(); \
12627 } \
12628 } \
12629 } while (0)
12630
12631/** The instruction can only be executed in VMX operation (VMX root mode and
12632 * non-root mode).
12633 *
12634 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12635 */
12636# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12637 do \
12638 { \
12639 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12640 else \
12641 { \
12642 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12643 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12644 return IEMOP_RAISE_INVALID_OPCODE(); \
12645 } \
12646 } while (0)
12647#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12648
12649/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12650 * 64-bit mode. */
12651#define IEMOP_HLP_NO_64BIT() \
12652 do \
12653 { \
12654 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12655 return IEMOP_RAISE_INVALID_OPCODE(); \
12656 } while (0)
12657
12658/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12659 * 64-bit mode. */
12660#define IEMOP_HLP_ONLY_64BIT() \
12661 do \
12662 { \
12663 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12664 return IEMOP_RAISE_INVALID_OPCODE(); \
12665 } while (0)
12666
12667/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12668#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12669 do \
12670 { \
12671 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12672 iemRecalEffOpSize64Default(pVCpu); \
12673 } while (0)
12674
12675/** The instruction has 64-bit operand size if 64-bit mode. */
12676#define IEMOP_HLP_64BIT_OP_SIZE() \
12677 do \
12678 { \
12679 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12680 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12681 } while (0)
12682
12683/** Only a REX prefix immediately preceeding the first opcode byte takes
12684 * effect. This macro helps ensuring this as well as logging bad guest code. */
12685#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12686 do \
12687 { \
12688 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12689 { \
12690 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12691 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12692 pVCpu->iem.s.uRexB = 0; \
12693 pVCpu->iem.s.uRexIndex = 0; \
12694 pVCpu->iem.s.uRexReg = 0; \
12695 iemRecalEffOpSize(pVCpu); \
12696 } \
12697 } while (0)
12698
12699/**
12700 * Done decoding.
12701 */
12702#define IEMOP_HLP_DONE_DECODING() \
12703 do \
12704 { \
12705 /*nothing for now, maybe later... */ \
12706 } while (0)
12707
12708/**
12709 * Done decoding, raise \#UD exception if lock prefix present.
12710 */
12711#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12712 do \
12713 { \
12714 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12715 { /* likely */ } \
12716 else \
12717 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12718 } while (0)
12719
12720
12721/**
12722 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12723 * repnz or size prefixes are present, or if in real or v8086 mode.
12724 */
12725#define IEMOP_HLP_DONE_VEX_DECODING() \
12726 do \
12727 { \
12728 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12729 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12730 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12731 { /* likely */ } \
12732 else \
12733 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12734 } while (0)
12735
12736/**
12737 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12738 * repnz or size prefixes are present, or if in real or v8086 mode.
12739 */
12740#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12741 do \
12742 { \
12743 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12744 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12745 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12746 && pVCpu->iem.s.uVexLength == 0)) \
12747 { /* likely */ } \
12748 else \
12749 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12750 } while (0)
12751
12752
12753/**
12754 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12755 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12756 * register 0, or if in real or v8086 mode.
12757 */
12758#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12759 do \
12760 { \
12761 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12762 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12763 && !pVCpu->iem.s.uVex3rdReg \
12764 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12765 { /* likely */ } \
12766 else \
12767 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12768 } while (0)
12769
12770/**
12771 * Done decoding VEX, no V, L=0.
12772 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12773 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12774 */
12775#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12776 do \
12777 { \
12778 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12779 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12780 && pVCpu->iem.s.uVexLength == 0 \
12781 && pVCpu->iem.s.uVex3rdReg == 0 \
12782 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12783 { /* likely */ } \
12784 else \
12785 return IEMOP_RAISE_INVALID_OPCODE(); \
12786 } while (0)
12787
12788#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12789 do \
12790 { \
12791 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12792 { /* likely */ } \
12793 else \
12794 { \
12795 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12796 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12797 } \
12798 } while (0)
12799#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12800 do \
12801 { \
12802 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12803 { /* likely */ } \
12804 else \
12805 { \
12806 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12807 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12808 } \
12809 } while (0)
12810
12811/**
12812 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12813 * are present.
12814 */
12815#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12816 do \
12817 { \
12818 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12819 { /* likely */ } \
12820 else \
12821 return IEMOP_RAISE_INVALID_OPCODE(); \
12822 } while (0)
12823
12824/**
12825 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12826 * prefixes are present.
12827 */
12828#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12829 do \
12830 { \
12831 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12832 { /* likely */ } \
12833 else \
12834 return IEMOP_RAISE_INVALID_OPCODE(); \
12835 } while (0)
12836
12837
12838/**
12839 * Calculates the effective address of a ModR/M memory operand.
12840 *
12841 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12842 *
12843 * @return Strict VBox status code.
12844 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12845 * @param bRm The ModRM byte.
12846 * @param cbImm The size of any immediate following the
12847 * effective address opcode bytes. Important for
12848 * RIP relative addressing.
12849 * @param pGCPtrEff Where to return the effective address.
12850 */
12851IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12852{
12853 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12854# define SET_SS_DEF() \
12855 do \
12856 { \
12857 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12858 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12859 } while (0)
12860
12861 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12862 {
12863/** @todo Check the effective address size crap! */
12864 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12865 {
12866 uint16_t u16EffAddr;
12867
12868 /* Handle the disp16 form with no registers first. */
12869 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12870 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12871 else
12872 {
12873 /* Get the displacment. */
12874 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12875 {
12876 case 0: u16EffAddr = 0; break;
12877 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12878 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12879 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12880 }
12881
12882 /* Add the base and index registers to the disp. */
12883 switch (bRm & X86_MODRM_RM_MASK)
12884 {
12885 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12886 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12887 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12888 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12889 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12890 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12891 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12892 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12893 }
12894 }
12895
12896 *pGCPtrEff = u16EffAddr;
12897 }
12898 else
12899 {
12900 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12901 uint32_t u32EffAddr;
12902
12903 /* Handle the disp32 form with no registers first. */
12904 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12905 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12906 else
12907 {
12908 /* Get the register (or SIB) value. */
12909 switch ((bRm & X86_MODRM_RM_MASK))
12910 {
12911 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12912 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12913 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12914 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12915 case 4: /* SIB */
12916 {
12917 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12918
12919 /* Get the index and scale it. */
12920 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12921 {
12922 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12923 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12924 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12925 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12926 case 4: u32EffAddr = 0; /*none */ break;
12927 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12928 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12929 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12930 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12931 }
12932 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12933
12934 /* add base */
12935 switch (bSib & X86_SIB_BASE_MASK)
12936 {
12937 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12938 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12939 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12940 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12941 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12942 case 5:
12943 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12944 {
12945 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12946 SET_SS_DEF();
12947 }
12948 else
12949 {
12950 uint32_t u32Disp;
12951 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12952 u32EffAddr += u32Disp;
12953 }
12954 break;
12955 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12956 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12957 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12958 }
12959 break;
12960 }
12961 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12962 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12963 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12964 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12965 }
12966
12967 /* Get and add the displacement. */
12968 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12969 {
12970 case 0:
12971 break;
12972 case 1:
12973 {
12974 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12975 u32EffAddr += i8Disp;
12976 break;
12977 }
12978 case 2:
12979 {
12980 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12981 u32EffAddr += u32Disp;
12982 break;
12983 }
12984 default:
12985 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12986 }
12987
12988 }
12989 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12990 *pGCPtrEff = u32EffAddr;
12991 else
12992 {
12993 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12994 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12995 }
12996 }
12997 }
12998 else
12999 {
13000 uint64_t u64EffAddr;
13001
13002 /* Handle the rip+disp32 form with no registers first. */
13003 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13004 {
13005 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13006 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13007 }
13008 else
13009 {
13010 /* Get the register (or SIB) value. */
13011 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13012 {
13013 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13014 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13015 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13016 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13017 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13018 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13019 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13020 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13021 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13022 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13023 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13024 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13025 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13026 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13027 /* SIB */
13028 case 4:
13029 case 12:
13030 {
13031 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13032
13033 /* Get the index and scale it. */
13034 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13035 {
13036 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13037 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13038 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13039 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13040 case 4: u64EffAddr = 0; /*none */ break;
13041 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13042 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13043 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13044 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13045 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13046 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13047 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13048 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13049 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13050 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13051 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13052 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13053 }
13054 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13055
13056 /* add base */
13057 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13058 {
13059 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13060 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13061 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13062 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13063 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13064 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13065 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13066 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13067 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13068 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13069 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13070 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13071 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13072 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13073 /* complicated encodings */
13074 case 5:
13075 case 13:
13076 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13077 {
13078 if (!pVCpu->iem.s.uRexB)
13079 {
13080 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13081 SET_SS_DEF();
13082 }
13083 else
13084 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13085 }
13086 else
13087 {
13088 uint32_t u32Disp;
13089 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13090 u64EffAddr += (int32_t)u32Disp;
13091 }
13092 break;
13093 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13094 }
13095 break;
13096 }
13097 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13098 }
13099
13100 /* Get and add the displacement. */
13101 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13102 {
13103 case 0:
13104 break;
13105 case 1:
13106 {
13107 int8_t i8Disp;
13108 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13109 u64EffAddr += i8Disp;
13110 break;
13111 }
13112 case 2:
13113 {
13114 uint32_t u32Disp;
13115 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13116 u64EffAddr += (int32_t)u32Disp;
13117 break;
13118 }
13119 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13120 }
13121
13122 }
13123
13124 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13125 *pGCPtrEff = u64EffAddr;
13126 else
13127 {
13128 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13129 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13130 }
13131 }
13132
13133 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13134 return VINF_SUCCESS;
13135}
13136
13137
13138/**
13139 * Calculates the effective address of a ModR/M memory operand.
13140 *
13141 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13142 *
13143 * @return Strict VBox status code.
13144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13145 * @param bRm The ModRM byte.
13146 * @param cbImm The size of any immediate following the
13147 * effective address opcode bytes. Important for
13148 * RIP relative addressing.
13149 * @param pGCPtrEff Where to return the effective address.
13150 * @param offRsp RSP displacement.
13151 */
13152IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13153{
13154 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13155# define SET_SS_DEF() \
13156 do \
13157 { \
13158 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13159 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13160 } while (0)
13161
13162 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13163 {
13164/** @todo Check the effective address size crap! */
13165 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13166 {
13167 uint16_t u16EffAddr;
13168
13169 /* Handle the disp16 form with no registers first. */
13170 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13171 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13172 else
13173 {
13174 /* Get the displacment. */
13175 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13176 {
13177 case 0: u16EffAddr = 0; break;
13178 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13179 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13180 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13181 }
13182
13183 /* Add the base and index registers to the disp. */
13184 switch (bRm & X86_MODRM_RM_MASK)
13185 {
13186 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13187 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13188 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13189 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13190 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13191 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13192 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13193 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13194 }
13195 }
13196
13197 *pGCPtrEff = u16EffAddr;
13198 }
13199 else
13200 {
13201 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13202 uint32_t u32EffAddr;
13203
13204 /* Handle the disp32 form with no registers first. */
13205 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13206 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13207 else
13208 {
13209 /* Get the register (or SIB) value. */
13210 switch ((bRm & X86_MODRM_RM_MASK))
13211 {
13212 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13213 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13214 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13215 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13216 case 4: /* SIB */
13217 {
13218 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13219
13220 /* Get the index and scale it. */
13221 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13222 {
13223 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13224 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13225 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13226 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13227 case 4: u32EffAddr = 0; /*none */ break;
13228 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13229 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13230 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13231 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13232 }
13233 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13234
13235 /* add base */
13236 switch (bSib & X86_SIB_BASE_MASK)
13237 {
13238 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13239 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13240 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13241 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13242 case 4:
13243 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13244 SET_SS_DEF();
13245 break;
13246 case 5:
13247 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13248 {
13249 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13250 SET_SS_DEF();
13251 }
13252 else
13253 {
13254 uint32_t u32Disp;
13255 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13256 u32EffAddr += u32Disp;
13257 }
13258 break;
13259 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13260 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13261 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13262 }
13263 break;
13264 }
13265 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13266 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13267 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13268 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13269 }
13270
13271 /* Get and add the displacement. */
13272 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13273 {
13274 case 0:
13275 break;
13276 case 1:
13277 {
13278 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13279 u32EffAddr += i8Disp;
13280 break;
13281 }
13282 case 2:
13283 {
13284 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13285 u32EffAddr += u32Disp;
13286 break;
13287 }
13288 default:
13289 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13290 }
13291
13292 }
13293 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13294 *pGCPtrEff = u32EffAddr;
13295 else
13296 {
13297 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13298 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13299 }
13300 }
13301 }
13302 else
13303 {
13304 uint64_t u64EffAddr;
13305
13306 /* Handle the rip+disp32 form with no registers first. */
13307 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13308 {
13309 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13310 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13311 }
13312 else
13313 {
13314 /* Get the register (or SIB) value. */
13315 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13316 {
13317 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13318 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13319 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13320 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13321 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13322 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13323 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13324 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13325 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13326 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13327 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13328 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13329 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13330 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13331 /* SIB */
13332 case 4:
13333 case 12:
13334 {
13335 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13336
13337 /* Get the index and scale it. */
13338 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13339 {
13340 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13341 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13342 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13343 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13344 case 4: u64EffAddr = 0; /*none */ break;
13345 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13346 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13347 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13348 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13349 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13350 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13351 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13352 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13353 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13354 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13355 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13356 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13357 }
13358 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13359
13360 /* add base */
13361 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13362 {
13363 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13364 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13365 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13366 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13367 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13368 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13369 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13370 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13371 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13372 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13373 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13374 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13375 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13376 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13377 /* complicated encodings */
13378 case 5:
13379 case 13:
13380 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13381 {
13382 if (!pVCpu->iem.s.uRexB)
13383 {
13384 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13385 SET_SS_DEF();
13386 }
13387 else
13388 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13389 }
13390 else
13391 {
13392 uint32_t u32Disp;
13393 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13394 u64EffAddr += (int32_t)u32Disp;
13395 }
13396 break;
13397 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13398 }
13399 break;
13400 }
13401 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13402 }
13403
13404 /* Get and add the displacement. */
13405 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13406 {
13407 case 0:
13408 break;
13409 case 1:
13410 {
13411 int8_t i8Disp;
13412 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13413 u64EffAddr += i8Disp;
13414 break;
13415 }
13416 case 2:
13417 {
13418 uint32_t u32Disp;
13419 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13420 u64EffAddr += (int32_t)u32Disp;
13421 break;
13422 }
13423 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13424 }
13425
13426 }
13427
13428 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13429 *pGCPtrEff = u64EffAddr;
13430 else
13431 {
13432 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13433 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13434 }
13435 }
13436
13437 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13438 return VINF_SUCCESS;
13439}
13440
13441
13442#ifdef IEM_WITH_SETJMP
13443/**
13444 * Calculates the effective address of a ModR/M memory operand.
13445 *
13446 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13447 *
13448 * May longjmp on internal error.
13449 *
13450 * @return The effective address.
13451 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13452 * @param bRm The ModRM byte.
13453 * @param cbImm The size of any immediate following the
13454 * effective address opcode bytes. Important for
13455 * RIP relative addressing.
13456 */
13457IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13458{
13459 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13460# define SET_SS_DEF() \
13461 do \
13462 { \
13463 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13464 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13465 } while (0)
13466
13467 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13468 {
13469/** @todo Check the effective address size crap! */
13470 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13471 {
13472 uint16_t u16EffAddr;
13473
13474 /* Handle the disp16 form with no registers first. */
13475 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13476 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13477 else
13478 {
13479 /* Get the displacment. */
13480 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13481 {
13482 case 0: u16EffAddr = 0; break;
13483 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13484 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13485 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13486 }
13487
13488 /* Add the base and index registers to the disp. */
13489 switch (bRm & X86_MODRM_RM_MASK)
13490 {
13491 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13492 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13493 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13494 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13495 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13496 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13497 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13498 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13499 }
13500 }
13501
13502 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13503 return u16EffAddr;
13504 }
13505
13506 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13507 uint32_t u32EffAddr;
13508
13509 /* Handle the disp32 form with no registers first. */
13510 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13511 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13512 else
13513 {
13514 /* Get the register (or SIB) value. */
13515 switch ((bRm & X86_MODRM_RM_MASK))
13516 {
13517 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13518 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13519 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13520 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13521 case 4: /* SIB */
13522 {
13523 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13524
13525 /* Get the index and scale it. */
13526 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13527 {
13528 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13529 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13530 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13531 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13532 case 4: u32EffAddr = 0; /*none */ break;
13533 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13534 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13535 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13536 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13537 }
13538 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13539
13540 /* add base */
13541 switch (bSib & X86_SIB_BASE_MASK)
13542 {
13543 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13544 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13545 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13546 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13547 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13548 case 5:
13549 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13550 {
13551 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13552 SET_SS_DEF();
13553 }
13554 else
13555 {
13556 uint32_t u32Disp;
13557 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13558 u32EffAddr += u32Disp;
13559 }
13560 break;
13561 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13562 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13563 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13564 }
13565 break;
13566 }
13567 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13568 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13569 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13570 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13571 }
13572
13573 /* Get and add the displacement. */
13574 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13575 {
13576 case 0:
13577 break;
13578 case 1:
13579 {
13580 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13581 u32EffAddr += i8Disp;
13582 break;
13583 }
13584 case 2:
13585 {
13586 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13587 u32EffAddr += u32Disp;
13588 break;
13589 }
13590 default:
13591 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13592 }
13593 }
13594
13595 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13596 {
13597 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13598 return u32EffAddr;
13599 }
13600 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13601 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13602 return u32EffAddr & UINT16_MAX;
13603 }
13604
13605 uint64_t u64EffAddr;
13606
13607 /* Handle the rip+disp32 form with no registers first. */
13608 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13609 {
13610 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13611 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13612 }
13613 else
13614 {
13615 /* Get the register (or SIB) value. */
13616 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13617 {
13618 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13619 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13620 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13621 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13622 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13623 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13624 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13625 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13626 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13627 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13628 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13629 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13630 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13631 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13632 /* SIB */
13633 case 4:
13634 case 12:
13635 {
13636 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13637
13638 /* Get the index and scale it. */
13639 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13640 {
13641 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13642 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13643 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13644 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13645 case 4: u64EffAddr = 0; /*none */ break;
13646 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13647 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13648 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13649 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13650 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13651 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13652 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13653 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13654 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13655 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13656 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13657 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13658 }
13659 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13660
13661 /* add base */
13662 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13663 {
13664 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13665 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13666 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13667 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13668 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13669 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13670 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13671 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13672 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13673 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13674 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13675 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13676 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13677 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13678 /* complicated encodings */
13679 case 5:
13680 case 13:
13681 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13682 {
13683 if (!pVCpu->iem.s.uRexB)
13684 {
13685 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13686 SET_SS_DEF();
13687 }
13688 else
13689 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13690 }
13691 else
13692 {
13693 uint32_t u32Disp;
13694 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13695 u64EffAddr += (int32_t)u32Disp;
13696 }
13697 break;
13698 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13699 }
13700 break;
13701 }
13702 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13703 }
13704
13705 /* Get and add the displacement. */
13706 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13707 {
13708 case 0:
13709 break;
13710 case 1:
13711 {
13712 int8_t i8Disp;
13713 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13714 u64EffAddr += i8Disp;
13715 break;
13716 }
13717 case 2:
13718 {
13719 uint32_t u32Disp;
13720 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13721 u64EffAddr += (int32_t)u32Disp;
13722 break;
13723 }
13724 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13725 }
13726
13727 }
13728
13729 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13730 {
13731 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13732 return u64EffAddr;
13733 }
13734 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13735 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13736 return u64EffAddr & UINT32_MAX;
13737}
13738#endif /* IEM_WITH_SETJMP */
13739
13740/** @} */
13741
13742
13743
13744/*
13745 * Include the instructions
13746 */
13747#include "IEMAllInstructions.cpp.h"
13748
13749
13750
13751#ifdef LOG_ENABLED
13752/**
13753 * Logs the current instruction.
13754 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13755 * @param fSameCtx Set if we have the same context information as the VMM,
13756 * clear if we may have already executed an instruction in
13757 * our debug context. When clear, we assume IEMCPU holds
13758 * valid CPU mode info.
13759 *
13760 * The @a fSameCtx parameter is now misleading and obsolete.
13761 * @param pszFunction The IEM function doing the execution.
13762 */
13763IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13764{
13765# ifdef IN_RING3
13766 if (LogIs2Enabled())
13767 {
13768 char szInstr[256];
13769 uint32_t cbInstr = 0;
13770 if (fSameCtx)
13771 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13772 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13773 szInstr, sizeof(szInstr), &cbInstr);
13774 else
13775 {
13776 uint32_t fFlags = 0;
13777 switch (pVCpu->iem.s.enmCpuMode)
13778 {
13779 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13780 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13781 case IEMMODE_16BIT:
13782 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13783 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13784 else
13785 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13786 break;
13787 }
13788 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13789 szInstr, sizeof(szInstr), &cbInstr);
13790 }
13791
13792 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.CTX_SUFF(pXState)->x87;
13793 Log2(("**** %s\n"
13794 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13795 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13796 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13797 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13798 " %s\n"
13799 , pszFunction,
13800 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13801 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13802 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13803 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13804 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13805 szInstr));
13806
13807 if (LogIs3Enabled())
13808 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13809 }
13810 else
13811# endif
13812 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13813 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13814 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13815}
13816#endif /* LOG_ENABLED */
13817
13818
13819#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13820/**
13821 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
13822 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
13823 *
13824 * @returns Modified rcStrict.
13825 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13826 * @param rcStrict The instruction execution status.
13827 */
13828static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13829{
13830 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
13831 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
13832 {
13833 /* VMX preemption timer takes priority over NMI-window exits. */
13834 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13835 {
13836 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13837 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13838 }
13839 /*
13840 * Check remaining intercepts.
13841 *
13842 * NMI-window and Interrupt-window VM-exits.
13843 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13844 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13845 *
13846 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13847 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13848 */
13849 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
13850 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13851 && !TRPMHasTrap(pVCpu))
13852 {
13853 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
13854 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13855 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
13856 {
13857 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13858 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13859 }
13860 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13861 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
13862 {
13863 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13864 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13865 }
13866 }
13867 }
13868 /* TPR-below threshold/APIC write has the highest priority. */
13869 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13870 {
13871 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13872 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13873 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13874 }
13875 /* MTF takes priority over VMX-preemption timer. */
13876 else
13877 {
13878 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13879 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13880 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13881 }
13882 return rcStrict;
13883}
13884#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13885
13886
13887/**
13888 * Makes status code addjustments (pass up from I/O and access handler)
13889 * as well as maintaining statistics.
13890 *
13891 * @returns Strict VBox status code to pass up.
13892 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13893 * @param rcStrict The status from executing an instruction.
13894 */
13895DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13896{
13897 if (rcStrict != VINF_SUCCESS)
13898 {
13899 if (RT_SUCCESS(rcStrict))
13900 {
13901 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13902 || rcStrict == VINF_IOM_R3_IOPORT_READ
13903 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13904 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13905 || rcStrict == VINF_IOM_R3_MMIO_READ
13906 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13907 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13908 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13909 || rcStrict == VINF_CPUM_R3_MSR_READ
13910 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13911 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13912 || rcStrict == VINF_EM_RAW_TO_R3
13913 || rcStrict == VINF_EM_TRIPLE_FAULT
13914 || rcStrict == VINF_GIM_R3_HYPERCALL
13915 /* raw-mode / virt handlers only: */
13916 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13917 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13918 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13919 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13920 || rcStrict == VINF_SELM_SYNC_GDT
13921 || rcStrict == VINF_CSAM_PENDING_ACTION
13922 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13923 /* nested hw.virt codes: */
13924 || rcStrict == VINF_VMX_VMEXIT
13925 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
13926 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13927 || rcStrict == VINF_SVM_VMEXIT
13928 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13929/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13930 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13931#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13932 if ( rcStrict == VINF_VMX_VMEXIT
13933 && rcPassUp == VINF_SUCCESS)
13934 rcStrict = VINF_SUCCESS;
13935 else
13936#endif
13937#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13938 if ( rcStrict == VINF_SVM_VMEXIT
13939 && rcPassUp == VINF_SUCCESS)
13940 rcStrict = VINF_SUCCESS;
13941 else
13942#endif
13943 if (rcPassUp == VINF_SUCCESS)
13944 pVCpu->iem.s.cRetInfStatuses++;
13945 else if ( rcPassUp < VINF_EM_FIRST
13946 || rcPassUp > VINF_EM_LAST
13947 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13948 {
13949 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13950 pVCpu->iem.s.cRetPassUpStatus++;
13951 rcStrict = rcPassUp;
13952 }
13953 else
13954 {
13955 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13956 pVCpu->iem.s.cRetInfStatuses++;
13957 }
13958 }
13959 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13960 pVCpu->iem.s.cRetAspectNotImplemented++;
13961 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13962 pVCpu->iem.s.cRetInstrNotImplemented++;
13963 else
13964 pVCpu->iem.s.cRetErrStatuses++;
13965 }
13966 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13967 {
13968 pVCpu->iem.s.cRetPassUpStatus++;
13969 rcStrict = pVCpu->iem.s.rcPassUp;
13970 }
13971
13972 return rcStrict;
13973}
13974
13975
13976/**
13977 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13978 * IEMExecOneWithPrefetchedByPC.
13979 *
13980 * Similar code is found in IEMExecLots.
13981 *
13982 * @return Strict VBox status code.
13983 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13984 * @param fExecuteInhibit If set, execute the instruction following CLI,
13985 * POP SS and MOV SS,GR.
13986 * @param pszFunction The calling function name.
13987 */
13988DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13989{
13990 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13991 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13992 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13993 RT_NOREF_PV(pszFunction);
13994
13995#ifdef IEM_WITH_SETJMP
13996 VBOXSTRICTRC rcStrict;
13997 jmp_buf JmpBuf;
13998 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13999 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14000 if ((rcStrict = setjmp(JmpBuf)) == 0)
14001 {
14002 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14003 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14004 }
14005 else
14006 pVCpu->iem.s.cLongJumps++;
14007 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14008#else
14009 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14010 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14011#endif
14012 if (rcStrict == VINF_SUCCESS)
14013 pVCpu->iem.s.cInstructions++;
14014 if (pVCpu->iem.s.cActiveMappings > 0)
14015 {
14016 Assert(rcStrict != VINF_SUCCESS);
14017 iemMemRollback(pVCpu);
14018 }
14019 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14020 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14021 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14022
14023//#ifdef DEBUG
14024// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14025//#endif
14026
14027#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14028 /*
14029 * Perform any VMX nested-guest instruction boundary actions.
14030 *
14031 * If any of these causes a VM-exit, we must skip executing the next
14032 * instruction (would run into stale page tables). A VM-exit makes sure
14033 * there is no interrupt-inhibition, so that should ensure we don't go
14034 * to try execute the next instruction. Clearing fExecuteInhibit is
14035 * problematic because of the setjmp/longjmp clobbering above.
14036 */
14037 if ( rcStrict == VINF_SUCCESS
14038 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14039 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14040 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14041#endif
14042
14043 /* Execute the next instruction as well if a cli, pop ss or
14044 mov ss, Gr has just completed successfully. */
14045 if ( fExecuteInhibit
14046 && rcStrict == VINF_SUCCESS
14047 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14048 && EMIsInhibitInterruptsActive(pVCpu))
14049 {
14050 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
14051 if (rcStrict == VINF_SUCCESS)
14052 {
14053#ifdef LOG_ENABLED
14054 iemLogCurInstr(pVCpu, false, pszFunction);
14055#endif
14056#ifdef IEM_WITH_SETJMP
14057 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14058 if ((rcStrict = setjmp(JmpBuf)) == 0)
14059 {
14060 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14061 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14062 }
14063 else
14064 pVCpu->iem.s.cLongJumps++;
14065 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14066#else
14067 IEM_OPCODE_GET_NEXT_U8(&b);
14068 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14069#endif
14070 if (rcStrict == VINF_SUCCESS)
14071 pVCpu->iem.s.cInstructions++;
14072 if (pVCpu->iem.s.cActiveMappings > 0)
14073 {
14074 Assert(rcStrict != VINF_SUCCESS);
14075 iemMemRollback(pVCpu);
14076 }
14077 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14078 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14079 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14080 }
14081 else if (pVCpu->iem.s.cActiveMappings > 0)
14082 iemMemRollback(pVCpu);
14083 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14084 }
14085
14086 /*
14087 * Return value fiddling, statistics and sanity assertions.
14088 */
14089 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14090
14091 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14092 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14093 return rcStrict;
14094}
14095
14096
14097/**
14098 * Execute one instruction.
14099 *
14100 * @return Strict VBox status code.
14101 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14102 */
14103VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14104{
14105#ifdef LOG_ENABLED
14106 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14107#endif
14108
14109 /*
14110 * Do the decoding and emulation.
14111 */
14112 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14113 if (rcStrict == VINF_SUCCESS)
14114 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14115 else if (pVCpu->iem.s.cActiveMappings > 0)
14116 iemMemRollback(pVCpu);
14117
14118 if (rcStrict != VINF_SUCCESS)
14119 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14120 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14121 return rcStrict;
14122}
14123
14124
14125VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14126{
14127 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14128
14129 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14130 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14131 if (rcStrict == VINF_SUCCESS)
14132 {
14133 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14134 if (pcbWritten)
14135 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14136 }
14137 else if (pVCpu->iem.s.cActiveMappings > 0)
14138 iemMemRollback(pVCpu);
14139
14140 return rcStrict;
14141}
14142
14143
14144VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14145 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14146{
14147 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14148
14149 VBOXSTRICTRC rcStrict;
14150 if ( cbOpcodeBytes
14151 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14152 {
14153 iemInitDecoder(pVCpu, false, false);
14154#ifdef IEM_WITH_CODE_TLB
14155 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14156 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14157 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14158 pVCpu->iem.s.offCurInstrStart = 0;
14159 pVCpu->iem.s.offInstrNextByte = 0;
14160#else
14161 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14162 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14163#endif
14164 rcStrict = VINF_SUCCESS;
14165 }
14166 else
14167 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14168 if (rcStrict == VINF_SUCCESS)
14169 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14170 else if (pVCpu->iem.s.cActiveMappings > 0)
14171 iemMemRollback(pVCpu);
14172
14173 return rcStrict;
14174}
14175
14176
14177VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14178{
14179 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14180
14181 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14182 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14183 if (rcStrict == VINF_SUCCESS)
14184 {
14185 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14186 if (pcbWritten)
14187 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14188 }
14189 else if (pVCpu->iem.s.cActiveMappings > 0)
14190 iemMemRollback(pVCpu);
14191
14192 return rcStrict;
14193}
14194
14195
14196VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14197 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14198{
14199 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14200
14201 VBOXSTRICTRC rcStrict;
14202 if ( cbOpcodeBytes
14203 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14204 {
14205 iemInitDecoder(pVCpu, true, false);
14206#ifdef IEM_WITH_CODE_TLB
14207 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14208 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14209 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14210 pVCpu->iem.s.offCurInstrStart = 0;
14211 pVCpu->iem.s.offInstrNextByte = 0;
14212#else
14213 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14214 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14215#endif
14216 rcStrict = VINF_SUCCESS;
14217 }
14218 else
14219 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14220 if (rcStrict == VINF_SUCCESS)
14221 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14222 else if (pVCpu->iem.s.cActiveMappings > 0)
14223 iemMemRollback(pVCpu);
14224
14225 return rcStrict;
14226}
14227
14228
14229/**
14230 * For debugging DISGetParamSize, may come in handy.
14231 *
14232 * @returns Strict VBox status code.
14233 * @param pVCpu The cross context virtual CPU structure of the
14234 * calling EMT.
14235 * @param pCtxCore The context core structure.
14236 * @param OpcodeBytesPC The PC of the opcode bytes.
14237 * @param pvOpcodeBytes Prefeched opcode bytes.
14238 * @param cbOpcodeBytes Number of prefetched bytes.
14239 * @param pcbWritten Where to return the number of bytes written.
14240 * Optional.
14241 */
14242VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14243 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14244 uint32_t *pcbWritten)
14245{
14246 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14247
14248 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14249 VBOXSTRICTRC rcStrict;
14250 if ( cbOpcodeBytes
14251 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14252 {
14253 iemInitDecoder(pVCpu, true, false);
14254#ifdef IEM_WITH_CODE_TLB
14255 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14256 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14257 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14258 pVCpu->iem.s.offCurInstrStart = 0;
14259 pVCpu->iem.s.offInstrNextByte = 0;
14260#else
14261 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14262 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14263#endif
14264 rcStrict = VINF_SUCCESS;
14265 }
14266 else
14267 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14268 if (rcStrict == VINF_SUCCESS)
14269 {
14270 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14271 if (pcbWritten)
14272 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14273 }
14274 else if (pVCpu->iem.s.cActiveMappings > 0)
14275 iemMemRollback(pVCpu);
14276
14277 return rcStrict;
14278}
14279
14280
14281/**
14282 * For handling split cacheline lock operations when the host has split-lock
14283 * detection enabled.
14284 *
14285 * This will cause the interpreter to disregard the lock prefix and implicit
14286 * locking (xchg).
14287 *
14288 * @returns Strict VBox status code.
14289 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14290 */
14291VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
14292{
14293 /*
14294 * Do the decoding and emulation.
14295 */
14296 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
14297 if (rcStrict == VINF_SUCCESS)
14298 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
14299 else if (pVCpu->iem.s.cActiveMappings > 0)
14300 iemMemRollback(pVCpu);
14301
14302 if (rcStrict != VINF_SUCCESS)
14303 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14304 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14305 return rcStrict;
14306}
14307
14308
14309VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14310{
14311 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14312 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14313
14314 /*
14315 * See if there is an interrupt pending in TRPM, inject it if we can.
14316 */
14317 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14318#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14319 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14320 if (fIntrEnabled)
14321 {
14322 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14323 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14324 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14325 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14326 else
14327 {
14328 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14329 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14330 }
14331 }
14332#else
14333 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14334#endif
14335
14336 /** @todo What if we are injecting an exception and not an interrupt? Is that
14337 * possible here? For now we assert it is indeed only an interrupt. */
14338 if ( fIntrEnabled
14339 && TRPMHasTrap(pVCpu)
14340 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14341 {
14342 uint8_t u8TrapNo;
14343 TRPMEVENT enmType;
14344 uint32_t uErrCode;
14345 RTGCPTR uCr2;
14346 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14347 AssertRC(rc2);
14348 Assert(enmType == TRPM_HARDWARE_INT);
14349 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14350 TRPMResetTrap(pVCpu);
14351#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14352 /* Injecting an event may cause a VM-exit. */
14353 if ( rcStrict != VINF_SUCCESS
14354 && rcStrict != VINF_IEM_RAISED_XCPT)
14355 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14356#else
14357 NOREF(rcStrict);
14358#endif
14359 }
14360
14361 /*
14362 * Initial decoder init w/ prefetch, then setup setjmp.
14363 */
14364 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14365 if (rcStrict == VINF_SUCCESS)
14366 {
14367#ifdef IEM_WITH_SETJMP
14368 jmp_buf JmpBuf;
14369 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14370 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14371 pVCpu->iem.s.cActiveMappings = 0;
14372 if ((rcStrict = setjmp(JmpBuf)) == 0)
14373#endif
14374 {
14375 /*
14376 * The run loop. We limit ourselves to 4096 instructions right now.
14377 */
14378 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14379 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14380 for (;;)
14381 {
14382 /*
14383 * Log the state.
14384 */
14385#ifdef LOG_ENABLED
14386 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14387#endif
14388
14389 /*
14390 * Do the decoding and emulation.
14391 */
14392 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14393 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14394 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14395 {
14396 Assert(pVCpu->iem.s.cActiveMappings == 0);
14397 pVCpu->iem.s.cInstructions++;
14398 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14399 {
14400 uint64_t fCpu = pVCpu->fLocalForcedActions
14401 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14402 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14403 | VMCPU_FF_TLB_FLUSH
14404 | VMCPU_FF_INHIBIT_INTERRUPTS
14405 | VMCPU_FF_BLOCK_NMIS
14406 | VMCPU_FF_UNHALT ));
14407
14408 if (RT_LIKELY( ( !fCpu
14409 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14410 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14411 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14412 {
14413 if (cMaxInstructionsGccStupidity-- > 0)
14414 {
14415 /* Poll timers every now an then according to the caller's specs. */
14416 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14417 || !TMTimerPollBool(pVM, pVCpu))
14418 {
14419 Assert(pVCpu->iem.s.cActiveMappings == 0);
14420 iemReInitDecoder(pVCpu);
14421 continue;
14422 }
14423 }
14424 }
14425 }
14426 Assert(pVCpu->iem.s.cActiveMappings == 0);
14427 }
14428 else if (pVCpu->iem.s.cActiveMappings > 0)
14429 iemMemRollback(pVCpu);
14430 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14431 break;
14432 }
14433 }
14434#ifdef IEM_WITH_SETJMP
14435 else
14436 {
14437 if (pVCpu->iem.s.cActiveMappings > 0)
14438 iemMemRollback(pVCpu);
14439# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14440 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14441# endif
14442 pVCpu->iem.s.cLongJumps++;
14443 }
14444 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14445#endif
14446
14447 /*
14448 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14449 */
14450 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14451 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14452 }
14453 else
14454 {
14455 if (pVCpu->iem.s.cActiveMappings > 0)
14456 iemMemRollback(pVCpu);
14457
14458#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14459 /*
14460 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14461 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14462 */
14463 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14464#endif
14465 }
14466
14467 /*
14468 * Maybe re-enter raw-mode and log.
14469 */
14470 if (rcStrict != VINF_SUCCESS)
14471 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14472 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14473 if (pcInstructions)
14474 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14475 return rcStrict;
14476}
14477
14478
14479/**
14480 * Interface used by EMExecuteExec, does exit statistics and limits.
14481 *
14482 * @returns Strict VBox status code.
14483 * @param pVCpu The cross context virtual CPU structure.
14484 * @param fWillExit To be defined.
14485 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14486 * @param cMaxInstructions Maximum number of instructions to execute.
14487 * @param cMaxInstructionsWithoutExits
14488 * The max number of instructions without exits.
14489 * @param pStats Where to return statistics.
14490 */
14491VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14492 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14493{
14494 NOREF(fWillExit); /** @todo define flexible exit crits */
14495
14496 /*
14497 * Initialize return stats.
14498 */
14499 pStats->cInstructions = 0;
14500 pStats->cExits = 0;
14501 pStats->cMaxExitDistance = 0;
14502 pStats->cReserved = 0;
14503
14504 /*
14505 * Initial decoder init w/ prefetch, then setup setjmp.
14506 */
14507 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14508 if (rcStrict == VINF_SUCCESS)
14509 {
14510#ifdef IEM_WITH_SETJMP
14511 jmp_buf JmpBuf;
14512 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14513 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14514 pVCpu->iem.s.cActiveMappings = 0;
14515 if ((rcStrict = setjmp(JmpBuf)) == 0)
14516#endif
14517 {
14518#ifdef IN_RING0
14519 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14520#endif
14521 uint32_t cInstructionSinceLastExit = 0;
14522
14523 /*
14524 * The run loop. We limit ourselves to 4096 instructions right now.
14525 */
14526 PVM pVM = pVCpu->CTX_SUFF(pVM);
14527 for (;;)
14528 {
14529 /*
14530 * Log the state.
14531 */
14532#ifdef LOG_ENABLED
14533 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14534#endif
14535
14536 /*
14537 * Do the decoding and emulation.
14538 */
14539 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14540
14541 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14542 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14543
14544 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14545 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14546 {
14547 pStats->cExits += 1;
14548 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14549 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14550 cInstructionSinceLastExit = 0;
14551 }
14552
14553 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14554 {
14555 Assert(pVCpu->iem.s.cActiveMappings == 0);
14556 pVCpu->iem.s.cInstructions++;
14557 pStats->cInstructions++;
14558 cInstructionSinceLastExit++;
14559 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14560 {
14561 uint64_t fCpu = pVCpu->fLocalForcedActions
14562 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14563 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14564 | VMCPU_FF_TLB_FLUSH
14565 | VMCPU_FF_INHIBIT_INTERRUPTS
14566 | VMCPU_FF_BLOCK_NMIS
14567 | VMCPU_FF_UNHALT ));
14568
14569 if (RT_LIKELY( ( ( !fCpu
14570 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14571 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14572 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14573 || pStats->cInstructions < cMinInstructions))
14574 {
14575 if (pStats->cInstructions < cMaxInstructions)
14576 {
14577 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14578 {
14579#ifdef IN_RING0
14580 if ( !fCheckPreemptionPending
14581 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14582#endif
14583 {
14584 Assert(pVCpu->iem.s.cActiveMappings == 0);
14585 iemReInitDecoder(pVCpu);
14586 continue;
14587 }
14588#ifdef IN_RING0
14589 rcStrict = VINF_EM_RAW_INTERRUPT;
14590 break;
14591#endif
14592 }
14593 }
14594 }
14595 Assert(!(fCpu & VMCPU_FF_IEM));
14596 }
14597 Assert(pVCpu->iem.s.cActiveMappings == 0);
14598 }
14599 else if (pVCpu->iem.s.cActiveMappings > 0)
14600 iemMemRollback(pVCpu);
14601 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14602 break;
14603 }
14604 }
14605#ifdef IEM_WITH_SETJMP
14606 else
14607 {
14608 if (pVCpu->iem.s.cActiveMappings > 0)
14609 iemMemRollback(pVCpu);
14610 pVCpu->iem.s.cLongJumps++;
14611 }
14612 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14613#endif
14614
14615 /*
14616 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14617 */
14618 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14619 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14620 }
14621 else
14622 {
14623 if (pVCpu->iem.s.cActiveMappings > 0)
14624 iemMemRollback(pVCpu);
14625
14626#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14627 /*
14628 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14629 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14630 */
14631 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14632#endif
14633 }
14634
14635 /*
14636 * Maybe re-enter raw-mode and log.
14637 */
14638 if (rcStrict != VINF_SUCCESS)
14639 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14640 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14641 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14642 return rcStrict;
14643}
14644
14645
14646/**
14647 * Injects a trap, fault, abort, software interrupt or external interrupt.
14648 *
14649 * The parameter list matches TRPMQueryTrapAll pretty closely.
14650 *
14651 * @returns Strict VBox status code.
14652 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14653 * @param u8TrapNo The trap number.
14654 * @param enmType What type is it (trap/fault/abort), software
14655 * interrupt or hardware interrupt.
14656 * @param uErrCode The error code if applicable.
14657 * @param uCr2 The CR2 value if applicable.
14658 * @param cbInstr The instruction length (only relevant for
14659 * software interrupts).
14660 */
14661VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14662 uint8_t cbInstr)
14663{
14664 iemInitDecoder(pVCpu, false, false);
14665#ifdef DBGFTRACE_ENABLED
14666 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14667 u8TrapNo, enmType, uErrCode, uCr2);
14668#endif
14669
14670 uint32_t fFlags;
14671 switch (enmType)
14672 {
14673 case TRPM_HARDWARE_INT:
14674 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14675 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14676 uErrCode = uCr2 = 0;
14677 break;
14678
14679 case TRPM_SOFTWARE_INT:
14680 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14681 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14682 uErrCode = uCr2 = 0;
14683 break;
14684
14685 case TRPM_TRAP:
14686 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14687 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14688 if (u8TrapNo == X86_XCPT_PF)
14689 fFlags |= IEM_XCPT_FLAGS_CR2;
14690 switch (u8TrapNo)
14691 {
14692 case X86_XCPT_DF:
14693 case X86_XCPT_TS:
14694 case X86_XCPT_NP:
14695 case X86_XCPT_SS:
14696 case X86_XCPT_PF:
14697 case X86_XCPT_AC:
14698 case X86_XCPT_GP:
14699 fFlags |= IEM_XCPT_FLAGS_ERR;
14700 break;
14701 }
14702 break;
14703
14704 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14705 }
14706
14707 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14708
14709 if (pVCpu->iem.s.cActiveMappings > 0)
14710 iemMemRollback(pVCpu);
14711
14712 return rcStrict;
14713}
14714
14715
14716/**
14717 * Injects the active TRPM event.
14718 *
14719 * @returns Strict VBox status code.
14720 * @param pVCpu The cross context virtual CPU structure.
14721 */
14722VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14723{
14724#ifndef IEM_IMPLEMENTS_TASKSWITCH
14725 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14726#else
14727 uint8_t u8TrapNo;
14728 TRPMEVENT enmType;
14729 uint32_t uErrCode;
14730 RTGCUINTPTR uCr2;
14731 uint8_t cbInstr;
14732 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
14733 if (RT_FAILURE(rc))
14734 return rc;
14735
14736 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
14737 * ICEBP \#DB injection as a special case. */
14738 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14739#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14740 if (rcStrict == VINF_SVM_VMEXIT)
14741 rcStrict = VINF_SUCCESS;
14742#endif
14743#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14744 if (rcStrict == VINF_VMX_VMEXIT)
14745 rcStrict = VINF_SUCCESS;
14746#endif
14747 /** @todo Are there any other codes that imply the event was successfully
14748 * delivered to the guest? See @bugref{6607}. */
14749 if ( rcStrict == VINF_SUCCESS
14750 || rcStrict == VINF_IEM_RAISED_XCPT)
14751 TRPMResetTrap(pVCpu);
14752
14753 return rcStrict;
14754#endif
14755}
14756
14757
14758VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14759{
14760 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14761 return VERR_NOT_IMPLEMENTED;
14762}
14763
14764
14765VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14766{
14767 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14768 return VERR_NOT_IMPLEMENTED;
14769}
14770
14771
14772#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14773/**
14774 * Executes a IRET instruction with default operand size.
14775 *
14776 * This is for PATM.
14777 *
14778 * @returns VBox status code.
14779 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14780 * @param pCtxCore The register frame.
14781 */
14782VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14783{
14784 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14785
14786 iemCtxCoreToCtx(pCtx, pCtxCore);
14787 iemInitDecoder(pVCpu);
14788 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14789 if (rcStrict == VINF_SUCCESS)
14790 iemCtxToCtxCore(pCtxCore, pCtx);
14791 else
14792 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14793 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14794 return rcStrict;
14795}
14796#endif
14797
14798
14799/**
14800 * Macro used by the IEMExec* method to check the given instruction length.
14801 *
14802 * Will return on failure!
14803 *
14804 * @param a_cbInstr The given instruction length.
14805 * @param a_cbMin The minimum length.
14806 */
14807#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14808 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14809 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14810
14811
14812/**
14813 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14814 *
14815 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14816 *
14817 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14818 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14819 * @param rcStrict The status code to fiddle.
14820 */
14821DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14822{
14823 iemUninitExec(pVCpu);
14824 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14825}
14826
14827
14828/**
14829 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14830 *
14831 * This API ASSUMES that the caller has already verified that the guest code is
14832 * allowed to access the I/O port. (The I/O port is in the DX register in the
14833 * guest state.)
14834 *
14835 * @returns Strict VBox status code.
14836 * @param pVCpu The cross context virtual CPU structure.
14837 * @param cbValue The size of the I/O port access (1, 2, or 4).
14838 * @param enmAddrMode The addressing mode.
14839 * @param fRepPrefix Indicates whether a repeat prefix is used
14840 * (doesn't matter which for this instruction).
14841 * @param cbInstr The instruction length in bytes.
14842 * @param iEffSeg The effective segment address.
14843 * @param fIoChecked Whether the access to the I/O port has been
14844 * checked or not. It's typically checked in the
14845 * HM scenario.
14846 */
14847VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14848 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14849{
14850 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14851 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14852
14853 /*
14854 * State init.
14855 */
14856 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14857
14858 /*
14859 * Switch orgy for getting to the right handler.
14860 */
14861 VBOXSTRICTRC rcStrict;
14862 if (fRepPrefix)
14863 {
14864 switch (enmAddrMode)
14865 {
14866 case IEMMODE_16BIT:
14867 switch (cbValue)
14868 {
14869 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14870 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14871 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14872 default:
14873 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14874 }
14875 break;
14876
14877 case IEMMODE_32BIT:
14878 switch (cbValue)
14879 {
14880 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14881 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14882 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14883 default:
14884 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14885 }
14886 break;
14887
14888 case IEMMODE_64BIT:
14889 switch (cbValue)
14890 {
14891 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14892 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14893 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14894 default:
14895 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14896 }
14897 break;
14898
14899 default:
14900 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14901 }
14902 }
14903 else
14904 {
14905 switch (enmAddrMode)
14906 {
14907 case IEMMODE_16BIT:
14908 switch (cbValue)
14909 {
14910 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14911 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14912 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14913 default:
14914 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14915 }
14916 break;
14917
14918 case IEMMODE_32BIT:
14919 switch (cbValue)
14920 {
14921 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14922 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14923 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14924 default:
14925 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14926 }
14927 break;
14928
14929 case IEMMODE_64BIT:
14930 switch (cbValue)
14931 {
14932 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14933 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14934 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14935 default:
14936 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14937 }
14938 break;
14939
14940 default:
14941 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14942 }
14943 }
14944
14945 if (pVCpu->iem.s.cActiveMappings)
14946 iemMemRollback(pVCpu);
14947
14948 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14949}
14950
14951
14952/**
14953 * Interface for HM and EM for executing string I/O IN (read) instructions.
14954 *
14955 * This API ASSUMES that the caller has already verified that the guest code is
14956 * allowed to access the I/O port. (The I/O port is in the DX register in the
14957 * guest state.)
14958 *
14959 * @returns Strict VBox status code.
14960 * @param pVCpu The cross context virtual CPU structure.
14961 * @param cbValue The size of the I/O port access (1, 2, or 4).
14962 * @param enmAddrMode The addressing mode.
14963 * @param fRepPrefix Indicates whether a repeat prefix is used
14964 * (doesn't matter which for this instruction).
14965 * @param cbInstr The instruction length in bytes.
14966 * @param fIoChecked Whether the access to the I/O port has been
14967 * checked or not. It's typically checked in the
14968 * HM scenario.
14969 */
14970VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14971 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14972{
14973 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14974
14975 /*
14976 * State init.
14977 */
14978 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14979
14980 /*
14981 * Switch orgy for getting to the right handler.
14982 */
14983 VBOXSTRICTRC rcStrict;
14984 if (fRepPrefix)
14985 {
14986 switch (enmAddrMode)
14987 {
14988 case IEMMODE_16BIT:
14989 switch (cbValue)
14990 {
14991 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14992 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14993 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14994 default:
14995 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14996 }
14997 break;
14998
14999 case IEMMODE_32BIT:
15000 switch (cbValue)
15001 {
15002 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15003 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15004 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15005 default:
15006 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15007 }
15008 break;
15009
15010 case IEMMODE_64BIT:
15011 switch (cbValue)
15012 {
15013 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15014 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15015 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15016 default:
15017 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15018 }
15019 break;
15020
15021 default:
15022 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15023 }
15024 }
15025 else
15026 {
15027 switch (enmAddrMode)
15028 {
15029 case IEMMODE_16BIT:
15030 switch (cbValue)
15031 {
15032 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15033 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15034 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15035 default:
15036 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15037 }
15038 break;
15039
15040 case IEMMODE_32BIT:
15041 switch (cbValue)
15042 {
15043 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15044 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15045 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15046 default:
15047 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15048 }
15049 break;
15050
15051 case IEMMODE_64BIT:
15052 switch (cbValue)
15053 {
15054 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15055 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15056 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15057 default:
15058 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15059 }
15060 break;
15061
15062 default:
15063 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15064 }
15065 }
15066
15067 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15068 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15069}
15070
15071
15072/**
15073 * Interface for rawmode to write execute an OUT instruction.
15074 *
15075 * @returns Strict VBox status code.
15076 * @param pVCpu The cross context virtual CPU structure.
15077 * @param cbInstr The instruction length in bytes.
15078 * @param u16Port The port to read.
15079 * @param fImm Whether the port is specified using an immediate operand or
15080 * using the implicit DX register.
15081 * @param cbReg The register size.
15082 *
15083 * @remarks In ring-0 not all of the state needs to be synced in.
15084 */
15085VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15086{
15087 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15088 Assert(cbReg <= 4 && cbReg != 3);
15089
15090 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15091 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15092 Assert(!pVCpu->iem.s.cActiveMappings);
15093 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15094}
15095
15096
15097/**
15098 * Interface for rawmode to write execute an IN instruction.
15099 *
15100 * @returns Strict VBox status code.
15101 * @param pVCpu The cross context virtual CPU structure.
15102 * @param cbInstr The instruction length in bytes.
15103 * @param u16Port The port to read.
15104 * @param fImm Whether the port is specified using an immediate operand or
15105 * using the implicit DX.
15106 * @param cbReg The register size.
15107 */
15108VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15109{
15110 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15111 Assert(cbReg <= 4 && cbReg != 3);
15112
15113 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15114 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15115 Assert(!pVCpu->iem.s.cActiveMappings);
15116 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15117}
15118
15119
15120/**
15121 * Interface for HM and EM to write to a CRx register.
15122 *
15123 * @returns Strict VBox status code.
15124 * @param pVCpu The cross context virtual CPU structure.
15125 * @param cbInstr The instruction length in bytes.
15126 * @param iCrReg The control register number (destination).
15127 * @param iGReg The general purpose register number (source).
15128 *
15129 * @remarks In ring-0 not all of the state needs to be synced in.
15130 */
15131VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15132{
15133 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15134 Assert(iCrReg < 16);
15135 Assert(iGReg < 16);
15136
15137 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15138 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15139 Assert(!pVCpu->iem.s.cActiveMappings);
15140 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15141}
15142
15143
15144/**
15145 * Interface for HM and EM to read from a CRx register.
15146 *
15147 * @returns Strict VBox status code.
15148 * @param pVCpu The cross context virtual CPU structure.
15149 * @param cbInstr The instruction length in bytes.
15150 * @param iGReg The general purpose register number (destination).
15151 * @param iCrReg The control register number (source).
15152 *
15153 * @remarks In ring-0 not all of the state needs to be synced in.
15154 */
15155VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15156{
15157 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15158 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15159 | CPUMCTX_EXTRN_APIC_TPR);
15160 Assert(iCrReg < 16);
15161 Assert(iGReg < 16);
15162
15163 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15164 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15165 Assert(!pVCpu->iem.s.cActiveMappings);
15166 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15167}
15168
15169
15170/**
15171 * Interface for HM and EM to clear the CR0[TS] bit.
15172 *
15173 * @returns Strict VBox status code.
15174 * @param pVCpu The cross context virtual CPU structure.
15175 * @param cbInstr The instruction length in bytes.
15176 *
15177 * @remarks In ring-0 not all of the state needs to be synced in.
15178 */
15179VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15180{
15181 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15182
15183 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15184 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15185 Assert(!pVCpu->iem.s.cActiveMappings);
15186 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15187}
15188
15189
15190/**
15191 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15192 *
15193 * @returns Strict VBox status code.
15194 * @param pVCpu The cross context virtual CPU structure.
15195 * @param cbInstr The instruction length in bytes.
15196 * @param uValue The value to load into CR0.
15197 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15198 * memory operand. Otherwise pass NIL_RTGCPTR.
15199 *
15200 * @remarks In ring-0 not all of the state needs to be synced in.
15201 */
15202VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15203{
15204 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15205
15206 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15207 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15208 Assert(!pVCpu->iem.s.cActiveMappings);
15209 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15210}
15211
15212
15213/**
15214 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15215 *
15216 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15217 *
15218 * @returns Strict VBox status code.
15219 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15220 * @param cbInstr The instruction length in bytes.
15221 * @remarks In ring-0 not all of the state needs to be synced in.
15222 * @thread EMT(pVCpu)
15223 */
15224VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15225{
15226 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15227
15228 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15229 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15230 Assert(!pVCpu->iem.s.cActiveMappings);
15231 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15232}
15233
15234
15235/**
15236 * Interface for HM and EM to emulate the WBINVD instruction.
15237 *
15238 * @returns Strict VBox status code.
15239 * @param pVCpu The cross context virtual CPU structure.
15240 * @param cbInstr The instruction length in bytes.
15241 *
15242 * @remarks In ring-0 not all of the state needs to be synced in.
15243 */
15244VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15245{
15246 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15247
15248 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15249 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15250 Assert(!pVCpu->iem.s.cActiveMappings);
15251 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15252}
15253
15254
15255/**
15256 * Interface for HM and EM to emulate the INVD instruction.
15257 *
15258 * @returns Strict VBox status code.
15259 * @param pVCpu The cross context virtual CPU structure.
15260 * @param cbInstr The instruction length in bytes.
15261 *
15262 * @remarks In ring-0 not all of the state needs to be synced in.
15263 */
15264VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15265{
15266 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15267
15268 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15269 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15270 Assert(!pVCpu->iem.s.cActiveMappings);
15271 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15272}
15273
15274
15275/**
15276 * Interface for HM and EM to emulate the INVLPG instruction.
15277 *
15278 * @returns Strict VBox status code.
15279 * @retval VINF_PGM_SYNC_CR3
15280 *
15281 * @param pVCpu The cross context virtual CPU structure.
15282 * @param cbInstr The instruction length in bytes.
15283 * @param GCPtrPage The effective address of the page to invalidate.
15284 *
15285 * @remarks In ring-0 not all of the state needs to be synced in.
15286 */
15287VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15288{
15289 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15290
15291 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15292 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15293 Assert(!pVCpu->iem.s.cActiveMappings);
15294 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15295}
15296
15297
15298/**
15299 * Interface for HM and EM to emulate the INVPCID instruction.
15300 *
15301 * @returns Strict VBox status code.
15302 * @retval VINF_PGM_SYNC_CR3
15303 *
15304 * @param pVCpu The cross context virtual CPU structure.
15305 * @param cbInstr The instruction length in bytes.
15306 * @param iEffSeg The effective segment register.
15307 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15308 * @param uType The invalidation type.
15309 *
15310 * @remarks In ring-0 not all of the state needs to be synced in.
15311 */
15312VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15313 uint64_t uType)
15314{
15315 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15316
15317 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15318 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15319 Assert(!pVCpu->iem.s.cActiveMappings);
15320 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15321}
15322
15323
15324/**
15325 * Interface for HM and EM to emulate the CPUID instruction.
15326 *
15327 * @returns Strict VBox status code.
15328 *
15329 * @param pVCpu The cross context virtual CPU structure.
15330 * @param cbInstr The instruction length in bytes.
15331 *
15332 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15333 */
15334VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15335{
15336 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15337 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15338
15339 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15340 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15341 Assert(!pVCpu->iem.s.cActiveMappings);
15342 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15343}
15344
15345
15346/**
15347 * Interface for HM and EM to emulate the RDPMC instruction.
15348 *
15349 * @returns Strict VBox status code.
15350 *
15351 * @param pVCpu The cross context virtual CPU structure.
15352 * @param cbInstr The instruction length in bytes.
15353 *
15354 * @remarks Not all of the state needs to be synced in.
15355 */
15356VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15357{
15358 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15359 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15360
15361 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15362 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15363 Assert(!pVCpu->iem.s.cActiveMappings);
15364 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15365}
15366
15367
15368/**
15369 * Interface for HM and EM to emulate the RDTSC instruction.
15370 *
15371 * @returns Strict VBox status code.
15372 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15373 *
15374 * @param pVCpu The cross context virtual CPU structure.
15375 * @param cbInstr The instruction length in bytes.
15376 *
15377 * @remarks Not all of the state needs to be synced in.
15378 */
15379VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15380{
15381 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15382 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15383
15384 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15385 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15386 Assert(!pVCpu->iem.s.cActiveMappings);
15387 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15388}
15389
15390
15391/**
15392 * Interface for HM and EM to emulate the RDTSCP instruction.
15393 *
15394 * @returns Strict VBox status code.
15395 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15396 *
15397 * @param pVCpu The cross context virtual CPU structure.
15398 * @param cbInstr The instruction length in bytes.
15399 *
15400 * @remarks Not all of the state needs to be synced in. Recommended
15401 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15402 */
15403VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15404{
15405 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15406 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15407
15408 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15409 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15410 Assert(!pVCpu->iem.s.cActiveMappings);
15411 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15412}
15413
15414
15415/**
15416 * Interface for HM and EM to emulate the RDMSR instruction.
15417 *
15418 * @returns Strict VBox status code.
15419 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15420 *
15421 * @param pVCpu The cross context virtual CPU structure.
15422 * @param cbInstr The instruction length in bytes.
15423 *
15424 * @remarks Not all of the state needs to be synced in. Requires RCX and
15425 * (currently) all MSRs.
15426 */
15427VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15428{
15429 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15430 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15431
15432 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15433 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15434 Assert(!pVCpu->iem.s.cActiveMappings);
15435 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15436}
15437
15438
15439/**
15440 * Interface for HM and EM to emulate the WRMSR instruction.
15441 *
15442 * @returns Strict VBox status code.
15443 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15444 *
15445 * @param pVCpu The cross context virtual CPU structure.
15446 * @param cbInstr The instruction length in bytes.
15447 *
15448 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15449 * and (currently) all MSRs.
15450 */
15451VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15452{
15453 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15454 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15455 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15456
15457 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15458 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15459 Assert(!pVCpu->iem.s.cActiveMappings);
15460 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15461}
15462
15463
15464/**
15465 * Interface for HM and EM to emulate the MONITOR instruction.
15466 *
15467 * @returns Strict VBox status code.
15468 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15469 *
15470 * @param pVCpu The cross context virtual CPU structure.
15471 * @param cbInstr The instruction length in bytes.
15472 *
15473 * @remarks Not all of the state needs to be synced in.
15474 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15475 * are used.
15476 */
15477VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15478{
15479 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15480 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15481
15482 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15483 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15484 Assert(!pVCpu->iem.s.cActiveMappings);
15485 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15486}
15487
15488
15489/**
15490 * Interface for HM and EM to emulate the MWAIT instruction.
15491 *
15492 * @returns Strict VBox status code.
15493 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15494 *
15495 * @param pVCpu The cross context virtual CPU structure.
15496 * @param cbInstr The instruction length in bytes.
15497 *
15498 * @remarks Not all of the state needs to be synced in.
15499 */
15500VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15501{
15502 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15503 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15504
15505 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15506 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15507 Assert(!pVCpu->iem.s.cActiveMappings);
15508 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15509}
15510
15511
15512/**
15513 * Interface for HM and EM to emulate the HLT instruction.
15514 *
15515 * @returns Strict VBox status code.
15516 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15517 *
15518 * @param pVCpu The cross context virtual CPU structure.
15519 * @param cbInstr The instruction length in bytes.
15520 *
15521 * @remarks Not all of the state needs to be synced in.
15522 */
15523VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15524{
15525 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15526
15527 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15528 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15529 Assert(!pVCpu->iem.s.cActiveMappings);
15530 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15531}
15532
15533
15534/**
15535 * Checks if IEM is in the process of delivering an event (interrupt or
15536 * exception).
15537 *
15538 * @returns true if we're in the process of raising an interrupt or exception,
15539 * false otherwise.
15540 * @param pVCpu The cross context virtual CPU structure.
15541 * @param puVector Where to store the vector associated with the
15542 * currently delivered event, optional.
15543 * @param pfFlags Where to store th event delivery flags (see
15544 * IEM_XCPT_FLAGS_XXX), optional.
15545 * @param puErr Where to store the error code associated with the
15546 * event, optional.
15547 * @param puCr2 Where to store the CR2 associated with the event,
15548 * optional.
15549 * @remarks The caller should check the flags to determine if the error code and
15550 * CR2 are valid for the event.
15551 */
15552VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15553{
15554 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15555 if (fRaisingXcpt)
15556 {
15557 if (puVector)
15558 *puVector = pVCpu->iem.s.uCurXcpt;
15559 if (pfFlags)
15560 *pfFlags = pVCpu->iem.s.fCurXcpt;
15561 if (puErr)
15562 *puErr = pVCpu->iem.s.uCurXcptErr;
15563 if (puCr2)
15564 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15565 }
15566 return fRaisingXcpt;
15567}
15568
15569#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15570
15571/**
15572 * Interface for HM and EM to emulate the CLGI instruction.
15573 *
15574 * @returns Strict VBox status code.
15575 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15576 * @param cbInstr The instruction length in bytes.
15577 * @thread EMT(pVCpu)
15578 */
15579VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15580{
15581 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15582
15583 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15584 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15585 Assert(!pVCpu->iem.s.cActiveMappings);
15586 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15587}
15588
15589
15590/**
15591 * Interface for HM and EM to emulate the STGI instruction.
15592 *
15593 * @returns Strict VBox status code.
15594 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15595 * @param cbInstr The instruction length in bytes.
15596 * @thread EMT(pVCpu)
15597 */
15598VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15599{
15600 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15601
15602 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15603 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15604 Assert(!pVCpu->iem.s.cActiveMappings);
15605 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15606}
15607
15608
15609/**
15610 * Interface for HM and EM to emulate the VMLOAD instruction.
15611 *
15612 * @returns Strict VBox status code.
15613 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15614 * @param cbInstr The instruction length in bytes.
15615 * @thread EMT(pVCpu)
15616 */
15617VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15618{
15619 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15620
15621 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15622 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15623 Assert(!pVCpu->iem.s.cActiveMappings);
15624 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15625}
15626
15627
15628/**
15629 * Interface for HM and EM to emulate the VMSAVE instruction.
15630 *
15631 * @returns Strict VBox status code.
15632 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15633 * @param cbInstr The instruction length in bytes.
15634 * @thread EMT(pVCpu)
15635 */
15636VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15637{
15638 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15639
15640 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15641 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15642 Assert(!pVCpu->iem.s.cActiveMappings);
15643 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15644}
15645
15646
15647/**
15648 * Interface for HM and EM to emulate the INVLPGA instruction.
15649 *
15650 * @returns Strict VBox status code.
15651 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15652 * @param cbInstr The instruction length in bytes.
15653 * @thread EMT(pVCpu)
15654 */
15655VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15656{
15657 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15658
15659 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15660 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15661 Assert(!pVCpu->iem.s.cActiveMappings);
15662 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15663}
15664
15665
15666/**
15667 * Interface for HM and EM to emulate the VMRUN instruction.
15668 *
15669 * @returns Strict VBox status code.
15670 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15671 * @param cbInstr The instruction length in bytes.
15672 * @thread EMT(pVCpu)
15673 */
15674VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15675{
15676 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15677 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15678
15679 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15680 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15681 Assert(!pVCpu->iem.s.cActiveMappings);
15682 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15683}
15684
15685
15686/**
15687 * Interface for HM and EM to emulate \#VMEXIT.
15688 *
15689 * @returns Strict VBox status code.
15690 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15691 * @param uExitCode The exit code.
15692 * @param uExitInfo1 The exit info. 1 field.
15693 * @param uExitInfo2 The exit info. 2 field.
15694 * @thread EMT(pVCpu)
15695 */
15696VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15697{
15698 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15699 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15700 if (pVCpu->iem.s.cActiveMappings)
15701 iemMemRollback(pVCpu);
15702 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15703}
15704
15705#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15706
15707#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15708
15709/**
15710 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15711 *
15712 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15713 * are performed. Bounds checks are strict builds only.
15714 *
15715 * @param pVmcs Pointer to the virtual VMCS.
15716 * @param u64VmcsField The VMCS field.
15717 * @param pu64Dst Where to store the VMCS value.
15718 *
15719 * @remarks May be called with interrupts disabled.
15720 * @todo This should probably be moved to CPUM someday.
15721 */
15722VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15723{
15724 AssertPtr(pVmcs);
15725 AssertPtr(pu64Dst);
15726 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15727}
15728
15729
15730/**
15731 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15732 *
15733 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15734 * are performed. Bounds checks are strict builds only.
15735 *
15736 * @param pVmcs Pointer to the virtual VMCS.
15737 * @param u64VmcsField The VMCS field.
15738 * @param u64Val The value to write.
15739 *
15740 * @remarks May be called with interrupts disabled.
15741 * @todo This should probably be moved to CPUM someday.
15742 */
15743VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15744{
15745 AssertPtr(pVmcs);
15746 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15747}
15748
15749
15750/**
15751 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15752 *
15753 * @returns Strict VBox status code.
15754 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15755 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15756 * the x2APIC device.
15757 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15758 *
15759 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15760 * @param idMsr The MSR being read.
15761 * @param pu64Value Pointer to the value being written or where to store the
15762 * value being read.
15763 * @param fWrite Whether this is an MSR write or read access.
15764 * @thread EMT(pVCpu)
15765 */
15766VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15767{
15768 Assert(pu64Value);
15769
15770 VBOXSTRICTRC rcStrict;
15771 if (fWrite)
15772 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15773 else
15774 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15775 Assert(!pVCpu->iem.s.cActiveMappings);
15776 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15777
15778}
15779
15780
15781/**
15782 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15783 *
15784 * @returns Strict VBox status code.
15785 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15786 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15787 *
15788 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15789 * @param pExitInfo Pointer to the VM-exit information.
15790 * @param pExitEventInfo Pointer to the VM-exit event information.
15791 * @thread EMT(pVCpu)
15792 */
15793VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15794{
15795 Assert(pExitInfo);
15796 Assert(pExitEventInfo);
15797 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15798 Assert(!pVCpu->iem.s.cActiveMappings);
15799 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15800
15801}
15802
15803
15804/**
15805 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15806 * VM-exit.
15807 *
15808 * @returns Strict VBox status code.
15809 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15810 * @thread EMT(pVCpu)
15811 */
15812VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15813{
15814 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15815 Assert(!pVCpu->iem.s.cActiveMappings);
15816 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15817}
15818
15819
15820/**
15821 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15822 *
15823 * @returns Strict VBox status code.
15824 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15825 * @thread EMT(pVCpu)
15826 */
15827VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15828{
15829 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15830 Assert(!pVCpu->iem.s.cActiveMappings);
15831 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15832}
15833
15834
15835/**
15836 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15837 *
15838 * @returns Strict VBox status code.
15839 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15840 * @param uVector The external interrupt vector (pass 0 if the external
15841 * interrupt is still pending).
15842 * @param fIntPending Whether the external interrupt is pending or
15843 * acknowdledged in the interrupt controller.
15844 * @thread EMT(pVCpu)
15845 */
15846VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15847{
15848 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15849 Assert(!pVCpu->iem.s.cActiveMappings);
15850 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15851}
15852
15853
15854/**
15855 * Interface for HM and EM to emulate VM-exit due to exceptions.
15856 *
15857 * Exception includes NMIs, software exceptions (those generated by INT3 or
15858 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15859 *
15860 * @returns Strict VBox status code.
15861 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15862 * @param pExitInfo Pointer to the VM-exit information.
15863 * @param pExitEventInfo Pointer to the VM-exit event information.
15864 * @thread EMT(pVCpu)
15865 */
15866VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15867{
15868 Assert(pExitInfo);
15869 Assert(pExitEventInfo);
15870 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15871 Assert(!pVCpu->iem.s.cActiveMappings);
15872 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15873}
15874
15875
15876/**
15877 * Interface for HM and EM to emulate VM-exit due to NMIs.
15878 *
15879 * @returns Strict VBox status code.
15880 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15881 * @thread EMT(pVCpu)
15882 */
15883VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15884{
15885 VMXVEXITINFO ExitInfo;
15886 RT_ZERO(ExitInfo);
15887 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
15888
15889 VMXVEXITEVENTINFO ExitEventInfo;
15890 RT_ZERO(ExitEventInfo);
15891 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15892 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15893 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15894
15895 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15896 Assert(!pVCpu->iem.s.cActiveMappings);
15897 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15898}
15899
15900
15901/**
15902 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15903 *
15904 * @returns Strict VBox status code.
15905 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15906 * @thread EMT(pVCpu)
15907 */
15908VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15909{
15910 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15911 Assert(!pVCpu->iem.s.cActiveMappings);
15912 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15913}
15914
15915
15916/**
15917 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15918 *
15919 * @returns Strict VBox status code.
15920 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15921 * @param uVector The SIPI vector.
15922 * @thread EMT(pVCpu)
15923 */
15924VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15925{
15926 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15927 Assert(!pVCpu->iem.s.cActiveMappings);
15928 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15929}
15930
15931
15932/**
15933 * Interface for HM and EM to emulate a VM-exit.
15934 *
15935 * If a specialized version of a VM-exit handler exists, that must be used instead.
15936 *
15937 * @returns Strict VBox status code.
15938 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15939 * @param uExitReason The VM-exit reason.
15940 * @param u64ExitQual The Exit qualification.
15941 * @thread EMT(pVCpu)
15942 */
15943VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15944{
15945 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15946 Assert(!pVCpu->iem.s.cActiveMappings);
15947 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15948}
15949
15950
15951/**
15952 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15953 *
15954 * This is meant to be used for those instructions that VMX provides additional
15955 * decoding information beyond just the instruction length!
15956 *
15957 * @returns Strict VBox status code.
15958 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15959 * @param pExitInfo Pointer to the VM-exit information.
15960 * @thread EMT(pVCpu)
15961 */
15962VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15963{
15964 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15965 Assert(!pVCpu->iem.s.cActiveMappings);
15966 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15967}
15968
15969
15970/**
15971 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15972 *
15973 * This is meant to be used for those instructions that VMX provides only the
15974 * instruction length.
15975 *
15976 * @returns Strict VBox status code.
15977 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15978 * @param pExitInfo Pointer to the VM-exit information.
15979 * @param cbInstr The instruction length in bytes.
15980 * @thread EMT(pVCpu)
15981 */
15982VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15983{
15984 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15985 Assert(!pVCpu->iem.s.cActiveMappings);
15986 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15987}
15988
15989
15990/**
15991 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
15992 * Virtualized-EOI, TPR-below threshold).
15993 *
15994 * @returns Strict VBox status code.
15995 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15996 * @param pExitInfo Pointer to the VM-exit information.
15997 * @thread EMT(pVCpu)
15998 */
15999VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16000{
16001 Assert(pExitInfo);
16002 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
16003 Assert(!pVCpu->iem.s.cActiveMappings);
16004 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16005}
16006
16007
16008/**
16009 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16010 *
16011 * @returns Strict VBox status code.
16012 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16013 * @param pExitInfo Pointer to the VM-exit information.
16014 * @param pExitEventInfo Pointer to the VM-exit event information.
16015 * @thread EMT(pVCpu)
16016 */
16017VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16018{
16019 Assert(pExitInfo);
16020 Assert(pExitEventInfo);
16021 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16022 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16023 Assert(!pVCpu->iem.s.cActiveMappings);
16024 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16025}
16026
16027
16028/**
16029 * Interface for HM and EM to emulate the VMREAD instruction.
16030 *
16031 * @returns Strict VBox status code.
16032 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16033 * @param pExitInfo Pointer to the VM-exit information.
16034 * @thread EMT(pVCpu)
16035 */
16036VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16037{
16038 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16039 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16040 Assert(pExitInfo);
16041
16042 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16043
16044 VBOXSTRICTRC rcStrict;
16045 uint8_t const cbInstr = pExitInfo->cbInstr;
16046 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16047 uint64_t const u64FieldEnc = fIs64BitMode
16048 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16049 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16050 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16051 {
16052 if (fIs64BitMode)
16053 {
16054 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16055 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16056 }
16057 else
16058 {
16059 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16060 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16061 }
16062 }
16063 else
16064 {
16065 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16066 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16067 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16068 }
16069 Assert(!pVCpu->iem.s.cActiveMappings);
16070 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16071}
16072
16073
16074/**
16075 * Interface for HM and EM to emulate the VMWRITE instruction.
16076 *
16077 * @returns Strict VBox status code.
16078 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16079 * @param pExitInfo Pointer to the VM-exit information.
16080 * @thread EMT(pVCpu)
16081 */
16082VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16083{
16084 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16085 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16086 Assert(pExitInfo);
16087
16088 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16089
16090 uint64_t u64Val;
16091 uint8_t iEffSeg;
16092 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16093 {
16094 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16095 iEffSeg = UINT8_MAX;
16096 }
16097 else
16098 {
16099 u64Val = pExitInfo->GCPtrEffAddr;
16100 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16101 }
16102 uint8_t const cbInstr = pExitInfo->cbInstr;
16103 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16104 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16105 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16106 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16107 Assert(!pVCpu->iem.s.cActiveMappings);
16108 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16109}
16110
16111
16112/**
16113 * Interface for HM and EM to emulate the VMPTRLD instruction.
16114 *
16115 * @returns Strict VBox status code.
16116 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16117 * @param pExitInfo Pointer to the VM-exit information.
16118 * @thread EMT(pVCpu)
16119 */
16120VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16121{
16122 Assert(pExitInfo);
16123 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16124 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16125
16126 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16127
16128 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16129 uint8_t const cbInstr = pExitInfo->cbInstr;
16130 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16131 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16132 Assert(!pVCpu->iem.s.cActiveMappings);
16133 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16134}
16135
16136
16137/**
16138 * Interface for HM and EM to emulate the VMPTRST instruction.
16139 *
16140 * @returns Strict VBox status code.
16141 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16142 * @param pExitInfo Pointer to the VM-exit information.
16143 * @thread EMT(pVCpu)
16144 */
16145VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16146{
16147 Assert(pExitInfo);
16148 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16149 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16150
16151 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16152
16153 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16154 uint8_t const cbInstr = pExitInfo->cbInstr;
16155 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16156 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16157 Assert(!pVCpu->iem.s.cActiveMappings);
16158 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16159}
16160
16161
16162/**
16163 * Interface for HM and EM to emulate the VMCLEAR instruction.
16164 *
16165 * @returns Strict VBox status code.
16166 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16167 * @param pExitInfo Pointer to the VM-exit information.
16168 * @thread EMT(pVCpu)
16169 */
16170VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16171{
16172 Assert(pExitInfo);
16173 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16174 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16175
16176 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16177
16178 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16179 uint8_t const cbInstr = pExitInfo->cbInstr;
16180 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16181 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16182 Assert(!pVCpu->iem.s.cActiveMappings);
16183 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16184}
16185
16186
16187/**
16188 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16189 *
16190 * @returns Strict VBox status code.
16191 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16192 * @param cbInstr The instruction length in bytes.
16193 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16194 * VMXINSTRID_VMRESUME).
16195 * @thread EMT(pVCpu)
16196 */
16197VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16198{
16199 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16200 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16201
16202 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16203 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16204 Assert(!pVCpu->iem.s.cActiveMappings);
16205 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16206}
16207
16208
16209/**
16210 * Interface for HM and EM to emulate the VMXON instruction.
16211 *
16212 * @returns Strict VBox status code.
16213 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16214 * @param pExitInfo Pointer to the VM-exit information.
16215 * @thread EMT(pVCpu)
16216 */
16217VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16218{
16219 Assert(pExitInfo);
16220 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16221 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16222
16223 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16224
16225 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16226 uint8_t const cbInstr = pExitInfo->cbInstr;
16227 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16228 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16229 Assert(!pVCpu->iem.s.cActiveMappings);
16230 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16231}
16232
16233
16234/**
16235 * Interface for HM and EM to emulate the VMXOFF instruction.
16236 *
16237 * @returns Strict VBox status code.
16238 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16239 * @param cbInstr The instruction length in bytes.
16240 * @thread EMT(pVCpu)
16241 */
16242VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16243{
16244 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16245 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16246
16247 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16248 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16249 Assert(!pVCpu->iem.s.cActiveMappings);
16250 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16251}
16252
16253
16254/**
16255 * Interface for HM and EM to emulate the INVVPID instruction.
16256 *
16257 * @returns Strict VBox status code.
16258 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16259 * @param pExitInfo Pointer to the VM-exit information.
16260 * @thread EMT(pVCpu)
16261 */
16262VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16263{
16264 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16265 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16266 Assert(pExitInfo);
16267
16268 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16269
16270 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16271 uint8_t const cbInstr = pExitInfo->cbInstr;
16272 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16273 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16274 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16275 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16276 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16277 Assert(!pVCpu->iem.s.cActiveMappings);
16278 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16279}
16280
16281
16282/**
16283 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16284 *
16285 * @remarks The @a pvUser argument is currently unused.
16286 */
16287PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16288 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16289 PGMACCESSORIGIN enmOrigin, void *pvUser)
16290{
16291 RT_NOREF3(pvPhys, enmOrigin, pvUser);
16292
16293 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16294 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16295 {
16296 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16297 Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16298
16299 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16300 * Currently they will go through as read accesses. */
16301 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16302 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16303 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16304 if (RT_FAILURE(rcStrict))
16305 return rcStrict;
16306
16307 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16308 return VINF_SUCCESS;
16309 }
16310
16311 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16312 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16313 if (RT_FAILURE(rc))
16314 return rc;
16315
16316 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16317 return VINF_PGM_HANDLER_DO_DEFAULT;
16318}
16319
16320#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16321
16322#ifdef IN_RING3
16323
16324/**
16325 * Handles the unlikely and probably fatal merge cases.
16326 *
16327 * @returns Merged status code.
16328 * @param rcStrict Current EM status code.
16329 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16330 * with @a rcStrict.
16331 * @param iMemMap The memory mapping index. For error reporting only.
16332 * @param pVCpu The cross context virtual CPU structure of the calling
16333 * thread, for error reporting only.
16334 */
16335DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16336 unsigned iMemMap, PVMCPUCC pVCpu)
16337{
16338 if (RT_FAILURE_NP(rcStrict))
16339 return rcStrict;
16340
16341 if (RT_FAILURE_NP(rcStrictCommit))
16342 return rcStrictCommit;
16343
16344 if (rcStrict == rcStrictCommit)
16345 return rcStrictCommit;
16346
16347 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16348 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16349 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16350 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16351 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16352 return VERR_IOM_FF_STATUS_IPE;
16353}
16354
16355
16356/**
16357 * Helper for IOMR3ProcessForceFlag.
16358 *
16359 * @returns Merged status code.
16360 * @param rcStrict Current EM status code.
16361 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16362 * with @a rcStrict.
16363 * @param iMemMap The memory mapping index. For error reporting only.
16364 * @param pVCpu The cross context virtual CPU structure of the calling
16365 * thread, for error reporting only.
16366 */
16367DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16368{
16369 /* Simple. */
16370 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16371 return rcStrictCommit;
16372
16373 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16374 return rcStrict;
16375
16376 /* EM scheduling status codes. */
16377 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16378 && rcStrict <= VINF_EM_LAST))
16379 {
16380 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16381 && rcStrictCommit <= VINF_EM_LAST))
16382 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16383 }
16384
16385 /* Unlikely */
16386 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16387}
16388
16389
16390/**
16391 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16392 *
16393 * @returns Merge between @a rcStrict and what the commit operation returned.
16394 * @param pVM The cross context VM structure.
16395 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16396 * @param rcStrict The status code returned by ring-0 or raw-mode.
16397 */
16398VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16399{
16400 /*
16401 * Reset the pending commit.
16402 */
16403 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16404 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16405 ("%#x %#x %#x\n",
16406 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16407 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16408
16409 /*
16410 * Commit the pending bounce buffers (usually just one).
16411 */
16412 unsigned cBufs = 0;
16413 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16414 while (iMemMap-- > 0)
16415 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16416 {
16417 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16418 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16419 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16420
16421 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16422 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16423 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16424
16425 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16426 {
16427 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16428 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16429 pbBuf,
16430 cbFirst,
16431 PGMACCESSORIGIN_IEM);
16432 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16433 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16434 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16435 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16436 }
16437
16438 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16439 {
16440 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16441 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16442 pbBuf + cbFirst,
16443 cbSecond,
16444 PGMACCESSORIGIN_IEM);
16445 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16446 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16447 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16448 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16449 }
16450 cBufs++;
16451 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16452 }
16453
16454 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16455 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16456 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16457 pVCpu->iem.s.cActiveMappings = 0;
16458 return rcStrict;
16459}
16460
16461#endif /* IN_RING3 */
16462
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette