VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 92489

Last change on this file since 92489 was 92426, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Refactor PGMGstGetPage and related API and functions to pass more info back to callers on page walk failures.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 652.0 KB
Line 
1/* $Id: IEMAll.cpp 92426 2021-11-15 13:25:47Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler.
442 */
443# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
444 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
445
446#else
447# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
448# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
449# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
450# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
451# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
452# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
453# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
454# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
455# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
456# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
457
458#endif
459
460#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
461/**
462 * Check if an SVM control/instruction intercept is set.
463 */
464# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
465 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
466
467/**
468 * Check if an SVM read CRx intercept is set.
469 */
470# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
471 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
472
473/**
474 * Check if an SVM write CRx intercept is set.
475 */
476# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
477 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
478
479/**
480 * Check if an SVM read DRx intercept is set.
481 */
482# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
483 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
484
485/**
486 * Check if an SVM write DRx intercept is set.
487 */
488# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
489 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
490
491/**
492 * Check if an SVM exception intercept is set.
493 */
494# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
495 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
496
497/**
498 * Invokes the SVM \#VMEXIT handler for the nested-guest.
499 */
500# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
501 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
502
503/**
504 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
505 * corresponding decode assist information.
506 */
507# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
508 do \
509 { \
510 uint64_t uExitInfo1; \
511 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
512 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
513 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
514 else \
515 uExitInfo1 = 0; \
516 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
517 } while (0)
518
519/** Check and handles SVM nested-guest instruction intercept and updates
520 * NRIP if needed.
521 */
522# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
523 do \
524 { \
525 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
526 { \
527 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
528 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
529 } \
530 } while (0)
531
532/** Checks and handles SVM nested-guest CR0 read intercept. */
533# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
534 do \
535 { \
536 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
537 { /* probably likely */ } \
538 else \
539 { \
540 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
541 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
542 } \
543 } while (0)
544
545/**
546 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
547 */
548# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
549 do { \
550 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
551 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
552 } while (0)
553
554#else
555# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
556# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
557# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
558# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
559# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
560# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
561# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
562# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
563# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
564# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
565# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
566
567#endif
568
569
570/*********************************************************************************************************************************
571* Global Variables *
572*********************************************************************************************************************************/
573extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
574
575
576/** Function table for the ADD instruction. */
577IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
578{
579 iemAImpl_add_u8, iemAImpl_add_u8_locked,
580 iemAImpl_add_u16, iemAImpl_add_u16_locked,
581 iemAImpl_add_u32, iemAImpl_add_u32_locked,
582 iemAImpl_add_u64, iemAImpl_add_u64_locked
583};
584
585/** Function table for the ADC instruction. */
586IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
587{
588 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
589 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
590 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
591 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
592};
593
594/** Function table for the SUB instruction. */
595IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
596{
597 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
598 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
599 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
600 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
601};
602
603/** Function table for the SBB instruction. */
604IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
605{
606 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
607 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
608 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
609 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
610};
611
612/** Function table for the OR instruction. */
613IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
614{
615 iemAImpl_or_u8, iemAImpl_or_u8_locked,
616 iemAImpl_or_u16, iemAImpl_or_u16_locked,
617 iemAImpl_or_u32, iemAImpl_or_u32_locked,
618 iemAImpl_or_u64, iemAImpl_or_u64_locked
619};
620
621/** Function table for the XOR instruction. */
622IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
623{
624 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
625 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
626 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
627 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
628};
629
630/** Function table for the AND instruction. */
631IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
632{
633 iemAImpl_and_u8, iemAImpl_and_u8_locked,
634 iemAImpl_and_u16, iemAImpl_and_u16_locked,
635 iemAImpl_and_u32, iemAImpl_and_u32_locked,
636 iemAImpl_and_u64, iemAImpl_and_u64_locked
637};
638
639/** Function table for the CMP instruction.
640 * @remarks Making operand order ASSUMPTIONS.
641 */
642IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
643{
644 iemAImpl_cmp_u8, NULL,
645 iemAImpl_cmp_u16, NULL,
646 iemAImpl_cmp_u32, NULL,
647 iemAImpl_cmp_u64, NULL
648};
649
650/** Function table for the TEST instruction.
651 * @remarks Making operand order ASSUMPTIONS.
652 */
653IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
654{
655 iemAImpl_test_u8, NULL,
656 iemAImpl_test_u16, NULL,
657 iemAImpl_test_u32, NULL,
658 iemAImpl_test_u64, NULL
659};
660
661/** Function table for the BT instruction. */
662IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
663{
664 NULL, NULL,
665 iemAImpl_bt_u16, NULL,
666 iemAImpl_bt_u32, NULL,
667 iemAImpl_bt_u64, NULL
668};
669
670/** Function table for the BTC instruction. */
671IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
672{
673 NULL, NULL,
674 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
675 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
676 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
677};
678
679/** Function table for the BTR instruction. */
680IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
681{
682 NULL, NULL,
683 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
684 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
685 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
686};
687
688/** Function table for the BTS instruction. */
689IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
690{
691 NULL, NULL,
692 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
693 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
694 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
695};
696
697/** Function table for the BSF instruction. */
698IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
699{
700 NULL, NULL,
701 iemAImpl_bsf_u16, NULL,
702 iemAImpl_bsf_u32, NULL,
703 iemAImpl_bsf_u64, NULL
704};
705
706/** Function table for the BSR instruction. */
707IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
708{
709 NULL, NULL,
710 iemAImpl_bsr_u16, NULL,
711 iemAImpl_bsr_u32, NULL,
712 iemAImpl_bsr_u64, NULL
713};
714
715/** Function table for the IMUL instruction. */
716IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
717{
718 NULL, NULL,
719 iemAImpl_imul_two_u16, NULL,
720 iemAImpl_imul_two_u32, NULL,
721 iemAImpl_imul_two_u64, NULL
722};
723
724/** Group 1 /r lookup table. */
725IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
726{
727 &g_iemAImpl_add,
728 &g_iemAImpl_or,
729 &g_iemAImpl_adc,
730 &g_iemAImpl_sbb,
731 &g_iemAImpl_and,
732 &g_iemAImpl_sub,
733 &g_iemAImpl_xor,
734 &g_iemAImpl_cmp
735};
736
737/** Function table for the INC instruction. */
738IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
739{
740 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
741 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
742 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
743 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
744};
745
746/** Function table for the DEC instruction. */
747IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
748{
749 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
750 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
751 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
752 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
753};
754
755/** Function table for the NEG instruction. */
756IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
757{
758 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
759 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
760 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
761 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
762};
763
764/** Function table for the NOT instruction. */
765IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
766{
767 iemAImpl_not_u8, iemAImpl_not_u8_locked,
768 iemAImpl_not_u16, iemAImpl_not_u16_locked,
769 iemAImpl_not_u32, iemAImpl_not_u32_locked,
770 iemAImpl_not_u64, iemAImpl_not_u64_locked
771};
772
773
774/** Function table for the ROL instruction. */
775IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
776{
777 iemAImpl_rol_u8,
778 iemAImpl_rol_u16,
779 iemAImpl_rol_u32,
780 iemAImpl_rol_u64
781};
782
783/** Function table for the ROR instruction. */
784IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
785{
786 iemAImpl_ror_u8,
787 iemAImpl_ror_u16,
788 iemAImpl_ror_u32,
789 iemAImpl_ror_u64
790};
791
792/** Function table for the RCL instruction. */
793IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
794{
795 iemAImpl_rcl_u8,
796 iemAImpl_rcl_u16,
797 iemAImpl_rcl_u32,
798 iemAImpl_rcl_u64
799};
800
801/** Function table for the RCR instruction. */
802IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
803{
804 iemAImpl_rcr_u8,
805 iemAImpl_rcr_u16,
806 iemAImpl_rcr_u32,
807 iemAImpl_rcr_u64
808};
809
810/** Function table for the SHL instruction. */
811IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
812{
813 iemAImpl_shl_u8,
814 iemAImpl_shl_u16,
815 iemAImpl_shl_u32,
816 iemAImpl_shl_u64
817};
818
819/** Function table for the SHR instruction. */
820IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
821{
822 iemAImpl_shr_u8,
823 iemAImpl_shr_u16,
824 iemAImpl_shr_u32,
825 iemAImpl_shr_u64
826};
827
828/** Function table for the SAR instruction. */
829IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
830{
831 iemAImpl_sar_u8,
832 iemAImpl_sar_u16,
833 iemAImpl_sar_u32,
834 iemAImpl_sar_u64
835};
836
837
838/** Function table for the MUL instruction. */
839IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
840{
841 iemAImpl_mul_u8,
842 iemAImpl_mul_u16,
843 iemAImpl_mul_u32,
844 iemAImpl_mul_u64
845};
846
847/** Function table for the IMUL instruction working implicitly on rAX. */
848IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
849{
850 iemAImpl_imul_u8,
851 iemAImpl_imul_u16,
852 iemAImpl_imul_u32,
853 iemAImpl_imul_u64
854};
855
856/** Function table for the DIV instruction. */
857IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
858{
859 iemAImpl_div_u8,
860 iemAImpl_div_u16,
861 iemAImpl_div_u32,
862 iemAImpl_div_u64
863};
864
865/** Function table for the MUL instruction. */
866IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
867{
868 iemAImpl_idiv_u8,
869 iemAImpl_idiv_u16,
870 iemAImpl_idiv_u32,
871 iemAImpl_idiv_u64
872};
873
874/** Function table for the SHLD instruction */
875IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
876{
877 iemAImpl_shld_u16,
878 iemAImpl_shld_u32,
879 iemAImpl_shld_u64,
880};
881
882/** Function table for the SHRD instruction */
883IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
884{
885 iemAImpl_shrd_u16,
886 iemAImpl_shrd_u32,
887 iemAImpl_shrd_u64,
888};
889
890
891/** Function table for the PUNPCKLBW instruction */
892IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
893/** Function table for the PUNPCKLBD instruction */
894IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
895/** Function table for the PUNPCKLDQ instruction */
896IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
897/** Function table for the PUNPCKLQDQ instruction */
898IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
899
900/** Function table for the PUNPCKHBW instruction */
901IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
902/** Function table for the PUNPCKHBD instruction */
903IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
904/** Function table for the PUNPCKHDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
906/** Function table for the PUNPCKHQDQ instruction */
907IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
908
909/** Function table for the PXOR instruction */
910IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
911/** Function table for the PCMPEQB instruction */
912IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
913/** Function table for the PCMPEQW instruction */
914IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
915/** Function table for the PCMPEQD instruction */
916IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
917
918
919#if defined(IEM_LOG_MEMORY_WRITES)
920/** What IEM just wrote. */
921uint8_t g_abIemWrote[256];
922/** How much IEM just wrote. */
923size_t g_cbIemWrote;
924#endif
925
926
927/*********************************************************************************************************************************
928* Internal Functions *
929*********************************************************************************************************************************/
930IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
931IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
932IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
933IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
934/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
935IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
936IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
937IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
938IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
939IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
940IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
941IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
944IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
945IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
946IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
947#ifdef IEM_WITH_SETJMP
948DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
949DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
950DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
951DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
952DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
953#endif
954
955IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
956IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
957IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
958IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
959IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
960IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
961IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
962IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
963IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
964IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
965IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
966IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
967IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
968IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
969IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
970IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
971DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
972DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
973
974#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
975IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
976IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
977IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
978IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
979IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
980IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
981IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
982#endif
983
984#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
985IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
986IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
987#endif
988
989
990/**
991 * Sets the pass up status.
992 *
993 * @returns VINF_SUCCESS.
994 * @param pVCpu The cross context virtual CPU structure of the
995 * calling thread.
996 * @param rcPassUp The pass up status. Must be informational.
997 * VINF_SUCCESS is not allowed.
998 */
999IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1000{
1001 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1002
1003 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1004 if (rcOldPassUp == VINF_SUCCESS)
1005 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1006 /* If both are EM scheduling codes, use EM priority rules. */
1007 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1008 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1009 {
1010 if (rcPassUp < rcOldPassUp)
1011 {
1012 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1013 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1014 }
1015 else
1016 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1017 }
1018 /* Override EM scheduling with specific status code. */
1019 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1020 {
1021 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1022 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1023 }
1024 /* Don't override specific status code, first come first served. */
1025 else
1026 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1027 return VINF_SUCCESS;
1028}
1029
1030
1031/**
1032 * Calculates the CPU mode.
1033 *
1034 * This is mainly for updating IEMCPU::enmCpuMode.
1035 *
1036 * @returns CPU mode.
1037 * @param pVCpu The cross context virtual CPU structure of the
1038 * calling thread.
1039 */
1040DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1041{
1042 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1043 return IEMMODE_64BIT;
1044 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1045 return IEMMODE_32BIT;
1046 return IEMMODE_16BIT;
1047}
1048
1049
1050/**
1051 * Initializes the execution state.
1052 *
1053 * @param pVCpu The cross context virtual CPU structure of the
1054 * calling thread.
1055 * @param fBypassHandlers Whether to bypass access handlers.
1056 *
1057 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1058 * side-effects in strict builds.
1059 */
1060DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1061{
1062 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1063 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1064 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1065 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1066 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1067 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1068 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1069 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1070 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1071 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1072
1073 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1074 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1075#ifdef VBOX_STRICT
1076 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1077 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1078 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1079 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1080 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1081 pVCpu->iem.s.uRexReg = 127;
1082 pVCpu->iem.s.uRexB = 127;
1083 pVCpu->iem.s.offModRm = 127;
1084 pVCpu->iem.s.uRexIndex = 127;
1085 pVCpu->iem.s.iEffSeg = 127;
1086 pVCpu->iem.s.idxPrefix = 127;
1087 pVCpu->iem.s.uVex3rdReg = 127;
1088 pVCpu->iem.s.uVexLength = 127;
1089 pVCpu->iem.s.fEvexStuff = 127;
1090 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1091# ifdef IEM_WITH_CODE_TLB
1092 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1093 pVCpu->iem.s.pbInstrBuf = NULL;
1094 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1095 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1096 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1097 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1098# else
1099 pVCpu->iem.s.offOpcode = 127;
1100 pVCpu->iem.s.cbOpcode = 127;
1101# endif
1102#endif
1103
1104 pVCpu->iem.s.cActiveMappings = 0;
1105 pVCpu->iem.s.iNextMapping = 0;
1106 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1107 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1108#if 0
1109#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1110 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1111 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1112 {
1113 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1114 Assert(pVmcs);
1115 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1116 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1117 {
1118 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1119 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1120 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1121 AssertRC(rc);
1122 }
1123 }
1124#endif
1125#endif
1126}
1127
1128#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1129/**
1130 * Performs a minimal reinitialization of the execution state.
1131 *
1132 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1133 * 'world-switch' types operations on the CPU. Currently only nested
1134 * hardware-virtualization uses it.
1135 *
1136 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1137 */
1138IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1139{
1140 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1141 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1142
1143 pVCpu->iem.s.uCpl = uCpl;
1144 pVCpu->iem.s.enmCpuMode = enmMode;
1145 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1146 pVCpu->iem.s.enmEffAddrMode = enmMode;
1147 if (enmMode != IEMMODE_64BIT)
1148 {
1149 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1150 pVCpu->iem.s.enmEffOpSize = enmMode;
1151 }
1152 else
1153 {
1154 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1155 pVCpu->iem.s.enmEffOpSize = enmMode;
1156 }
1157 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1158#ifndef IEM_WITH_CODE_TLB
1159 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1160 pVCpu->iem.s.offOpcode = 0;
1161 pVCpu->iem.s.cbOpcode = 0;
1162#endif
1163 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1164}
1165#endif
1166
1167/**
1168 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1169 *
1170 * @param pVCpu The cross context virtual CPU structure of the
1171 * calling thread.
1172 */
1173DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1174{
1175 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1176#ifdef VBOX_STRICT
1177# ifdef IEM_WITH_CODE_TLB
1178 NOREF(pVCpu);
1179# else
1180 pVCpu->iem.s.cbOpcode = 0;
1181# endif
1182#else
1183 NOREF(pVCpu);
1184#endif
1185}
1186
1187
1188/**
1189 * Initializes the decoder state.
1190 *
1191 * iemReInitDecoder is mostly a copy of this function.
1192 *
1193 * @param pVCpu The cross context virtual CPU structure of the
1194 * calling thread.
1195 * @param fBypassHandlers Whether to bypass access handlers.
1196 * @param fDisregardLock Whether to disregard the LOCK prefix.
1197 */
1198DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1199{
1200 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1201 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1202 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1203 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1206 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1207 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1210
1211 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1212 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1213 pVCpu->iem.s.enmCpuMode = enmMode;
1214 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1215 pVCpu->iem.s.enmEffAddrMode = enmMode;
1216 if (enmMode != IEMMODE_64BIT)
1217 {
1218 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1219 pVCpu->iem.s.enmEffOpSize = enmMode;
1220 }
1221 else
1222 {
1223 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1224 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1225 }
1226 pVCpu->iem.s.fPrefixes = 0;
1227 pVCpu->iem.s.uRexReg = 0;
1228 pVCpu->iem.s.uRexB = 0;
1229 pVCpu->iem.s.uRexIndex = 0;
1230 pVCpu->iem.s.idxPrefix = 0;
1231 pVCpu->iem.s.uVex3rdReg = 0;
1232 pVCpu->iem.s.uVexLength = 0;
1233 pVCpu->iem.s.fEvexStuff = 0;
1234 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1235#ifdef IEM_WITH_CODE_TLB
1236 pVCpu->iem.s.pbInstrBuf = NULL;
1237 pVCpu->iem.s.offInstrNextByte = 0;
1238 pVCpu->iem.s.offCurInstrStart = 0;
1239# ifdef VBOX_STRICT
1240 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1241 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1242 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1243# endif
1244#else
1245 pVCpu->iem.s.offOpcode = 0;
1246 pVCpu->iem.s.cbOpcode = 0;
1247#endif
1248 pVCpu->iem.s.offModRm = 0;
1249 pVCpu->iem.s.cActiveMappings = 0;
1250 pVCpu->iem.s.iNextMapping = 0;
1251 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1252 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1253 pVCpu->iem.s.fDisregardLock = fDisregardLock;
1254
1255#ifdef DBGFTRACE_ENABLED
1256 switch (enmMode)
1257 {
1258 case IEMMODE_64BIT:
1259 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1260 break;
1261 case IEMMODE_32BIT:
1262 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1263 break;
1264 case IEMMODE_16BIT:
1265 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1266 break;
1267 }
1268#endif
1269}
1270
1271
1272/**
1273 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1274 *
1275 * This is mostly a copy of iemInitDecoder.
1276 *
1277 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1278 */
1279DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1280{
1281 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1282 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1283 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1284 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1285 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1286 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1287 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1288 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1289 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1290
1291 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1292 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1293 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1294 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1295 pVCpu->iem.s.enmEffAddrMode = enmMode;
1296 if (enmMode != IEMMODE_64BIT)
1297 {
1298 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1299 pVCpu->iem.s.enmEffOpSize = enmMode;
1300 }
1301 else
1302 {
1303 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1304 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1305 }
1306 pVCpu->iem.s.fPrefixes = 0;
1307 pVCpu->iem.s.uRexReg = 0;
1308 pVCpu->iem.s.uRexB = 0;
1309 pVCpu->iem.s.uRexIndex = 0;
1310 pVCpu->iem.s.idxPrefix = 0;
1311 pVCpu->iem.s.uVex3rdReg = 0;
1312 pVCpu->iem.s.uVexLength = 0;
1313 pVCpu->iem.s.fEvexStuff = 0;
1314 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1315#ifdef IEM_WITH_CODE_TLB
1316 if (pVCpu->iem.s.pbInstrBuf)
1317 {
1318 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1319 - pVCpu->iem.s.uInstrBufPc;
1320 if (off < pVCpu->iem.s.cbInstrBufTotal)
1321 {
1322 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1323 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1324 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1325 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1326 else
1327 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1328 }
1329 else
1330 {
1331 pVCpu->iem.s.pbInstrBuf = NULL;
1332 pVCpu->iem.s.offInstrNextByte = 0;
1333 pVCpu->iem.s.offCurInstrStart = 0;
1334 pVCpu->iem.s.cbInstrBuf = 0;
1335 pVCpu->iem.s.cbInstrBufTotal = 0;
1336 }
1337 }
1338 else
1339 {
1340 pVCpu->iem.s.offInstrNextByte = 0;
1341 pVCpu->iem.s.offCurInstrStart = 0;
1342 pVCpu->iem.s.cbInstrBuf = 0;
1343 pVCpu->iem.s.cbInstrBufTotal = 0;
1344 }
1345#else
1346 pVCpu->iem.s.cbOpcode = 0;
1347 pVCpu->iem.s.offOpcode = 0;
1348#endif
1349 pVCpu->iem.s.offModRm = 0;
1350 Assert(pVCpu->iem.s.cActiveMappings == 0);
1351 pVCpu->iem.s.iNextMapping = 0;
1352 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1353 Assert(pVCpu->iem.s.fBypassHandlers == false);
1354
1355#ifdef DBGFTRACE_ENABLED
1356 switch (enmMode)
1357 {
1358 case IEMMODE_64BIT:
1359 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1360 break;
1361 case IEMMODE_32BIT:
1362 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1363 break;
1364 case IEMMODE_16BIT:
1365 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1366 break;
1367 }
1368#endif
1369}
1370
1371
1372
1373/**
1374 * Prefetch opcodes the first time when starting executing.
1375 *
1376 * @returns Strict VBox status code.
1377 * @param pVCpu The cross context virtual CPU structure of the
1378 * calling thread.
1379 * @param fBypassHandlers Whether to bypass access handlers.
1380 * @param fDisregardLock Whether to disregard LOCK prefixes.
1381 *
1382 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
1383 * store them as such.
1384 */
1385IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1386{
1387 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
1388
1389#ifdef IEM_WITH_CODE_TLB
1390 /** @todo Do ITLB lookup here. */
1391
1392#else /* !IEM_WITH_CODE_TLB */
1393
1394 /*
1395 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1396 *
1397 * First translate CS:rIP to a physical address.
1398 */
1399 uint32_t cbToTryRead;
1400 RTGCPTR GCPtrPC;
1401 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1402 {
1403 cbToTryRead = PAGE_SIZE;
1404 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1405 if (IEM_IS_CANONICAL(GCPtrPC))
1406 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1407 else
1408 return iemRaiseGeneralProtectionFault0(pVCpu);
1409 }
1410 else
1411 {
1412 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1413 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1414 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1415 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1416 else
1417 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1418 if (cbToTryRead) { /* likely */ }
1419 else /* overflowed */
1420 {
1421 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1422 cbToTryRead = UINT32_MAX;
1423 }
1424 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1425 Assert(GCPtrPC <= UINT32_MAX);
1426 }
1427
1428 PGMPTWALK Walk;
1429 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
1430 if (RT_SUCCESS(rc))
1431 Assert(Walk.fSucceeded); /* probable. */
1432 else
1433 {
1434 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1435 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1436 }
1437 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1438 else
1439 {
1440 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1441 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1442 }
1443 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1444 else
1445 {
1446 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1447 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1448 }
1449 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & PAGE_OFFSET_MASK);
1450 /** @todo Check reserved bits and such stuff. PGM is better at doing
1451 * that, so do it when implementing the guest virtual address
1452 * TLB... */
1453
1454 /*
1455 * Read the bytes at this address.
1456 */
1457 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1458 if (cbToTryRead > cbLeftOnPage)
1459 cbToTryRead = cbLeftOnPage;
1460 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1461 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1462
1463 if (!pVCpu->iem.s.fBypassHandlers)
1464 {
1465 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1466 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1467 { /* likely */ }
1468 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1469 {
1470 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1471 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1472 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1473 }
1474 else
1475 {
1476 Log((RT_SUCCESS(rcStrict)
1477 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1478 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1479 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1480 return rcStrict;
1481 }
1482 }
1483 else
1484 {
1485 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1486 if (RT_SUCCESS(rc))
1487 { /* likely */ }
1488 else
1489 {
1490 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1491 GCPtrPC, GCPhys, rc, cbToTryRead));
1492 return rc;
1493 }
1494 }
1495 pVCpu->iem.s.cbOpcode = cbToTryRead;
1496#endif /* !IEM_WITH_CODE_TLB */
1497 return VINF_SUCCESS;
1498}
1499
1500
1501/**
1502 * Invalidates the IEM TLBs.
1503 *
1504 * This is called internally as well as by PGM when moving GC mappings.
1505 *
1506 * @returns
1507 * @param pVCpu The cross context virtual CPU structure of the calling
1508 * thread.
1509 * @param fVmm Set when PGM calls us with a remapping.
1510 */
1511VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1512{
1513#ifdef IEM_WITH_CODE_TLB
1514 pVCpu->iem.s.cbInstrBufTotal = 0;
1515 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1516 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1517 { /* very likely */ }
1518 else
1519 {
1520 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1521 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1522 while (i-- > 0)
1523 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1524 }
1525#endif
1526
1527#ifdef IEM_WITH_DATA_TLB
1528 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1529 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1530 { /* very likely */ }
1531 else
1532 {
1533 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1534 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1535 while (i-- > 0)
1536 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1537 }
1538#endif
1539 NOREF(pVCpu); NOREF(fVmm);
1540}
1541
1542
1543/**
1544 * Invalidates a page in the TLBs.
1545 *
1546 * @param pVCpu The cross context virtual CPU structure of the calling
1547 * thread.
1548 * @param GCPtr The address of the page to invalidate
1549 */
1550VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1551{
1552#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1553 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1554 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1555 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1556 uintptr_t idx = (uint8_t)GCPtr;
1557
1558# ifdef IEM_WITH_CODE_TLB
1559 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1560 {
1561 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1562 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1563 pVCpu->iem.s.cbInstrBufTotal = 0;
1564 }
1565# endif
1566
1567# ifdef IEM_WITH_DATA_TLB
1568 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1569 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1570# endif
1571#else
1572 NOREF(pVCpu); NOREF(GCPtr);
1573#endif
1574}
1575
1576
1577/**
1578 * Invalidates the host physical aspects of the IEM TLBs.
1579 *
1580 * This is called internally as well as by PGM when moving GC mappings.
1581 *
1582 * @param pVCpu The cross context virtual CPU structure of the calling
1583 * thread.
1584 */
1585VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1586{
1587#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1588 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1589
1590# ifdef IEM_WITH_CODE_TLB
1591 pVCpu->iem.s.cbInstrBufTotal = 0;
1592# endif
1593 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1594 if (uTlbPhysRev != 0)
1595 {
1596 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1597 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1598 }
1599 else
1600 {
1601 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1602 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1603
1604 unsigned i;
1605# ifdef IEM_WITH_CODE_TLB
1606 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1607 while (i-- > 0)
1608 {
1609 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1610 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1611 }
1612# endif
1613# ifdef IEM_WITH_DATA_TLB
1614 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1615 while (i-- > 0)
1616 {
1617 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1618 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1619 }
1620# endif
1621 }
1622#else
1623 NOREF(pVCpu);
1624#endif
1625}
1626
1627
1628/**
1629 * Invalidates the host physical aspects of the IEM TLBs.
1630 *
1631 * This is called internally as well as by PGM when moving GC mappings.
1632 *
1633 * @param pVM The cross context VM structure.
1634 *
1635 * @remarks Caller holds the PGM lock.
1636 */
1637VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1638{
1639 RT_NOREF_PV(pVM);
1640}
1641
1642#ifdef IEM_WITH_CODE_TLB
1643
1644/**
1645 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1646 * failure and jumps.
1647 *
1648 * We end up here for a number of reasons:
1649 * - pbInstrBuf isn't yet initialized.
1650 * - Advancing beyond the buffer boundrary (e.g. cross page).
1651 * - Advancing beyond the CS segment limit.
1652 * - Fetching from non-mappable page (e.g. MMIO).
1653 *
1654 * @param pVCpu The cross context virtual CPU structure of the
1655 * calling thread.
1656 * @param pvDst Where to return the bytes.
1657 * @param cbDst Number of bytes to read.
1658 *
1659 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1660 */
1661IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1662{
1663#ifdef IN_RING3
1664 for (;;)
1665 {
1666 Assert(cbDst <= 8);
1667 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1668
1669 /*
1670 * We might have a partial buffer match, deal with that first to make the
1671 * rest simpler. This is the first part of the cross page/buffer case.
1672 */
1673 if (pVCpu->iem.s.pbInstrBuf != NULL)
1674 {
1675 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1676 {
1677 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1678 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1679 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1680
1681 cbDst -= cbCopy;
1682 pvDst = (uint8_t *)pvDst + cbCopy;
1683 offBuf += cbCopy;
1684 pVCpu->iem.s.offInstrNextByte += offBuf;
1685 }
1686 }
1687
1688 /*
1689 * Check segment limit, figuring how much we're allowed to access at this point.
1690 *
1691 * We will fault immediately if RIP is past the segment limit / in non-canonical
1692 * territory. If we do continue, there are one or more bytes to read before we
1693 * end up in trouble and we need to do that first before faulting.
1694 */
1695 RTGCPTR GCPtrFirst;
1696 uint32_t cbMaxRead;
1697 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1698 {
1699 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1700 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1701 { /* likely */ }
1702 else
1703 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1704 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1705 }
1706 else
1707 {
1708 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1709 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1710 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1711 { /* likely */ }
1712 else
1713 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1714 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1715 if (cbMaxRead != 0)
1716 { /* likely */ }
1717 else
1718 {
1719 /* Overflowed because address is 0 and limit is max. */
1720 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1721 cbMaxRead = X86_PAGE_SIZE;
1722 }
1723 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1724 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1725 if (cbMaxRead2 < cbMaxRead)
1726 cbMaxRead = cbMaxRead2;
1727 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1728 }
1729
1730 /*
1731 * Get the TLB entry for this piece of code.
1732 */
1733 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1734 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1735 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1736 if (pTlbe->uTag == uTag)
1737 {
1738 /* likely when executing lots of code, otherwise unlikely */
1739# ifdef VBOX_WITH_STATISTICS
1740 pVCpu->iem.s.CodeTlb.cTlbHits++;
1741# endif
1742 }
1743 else
1744 {
1745 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1746 PGMPTWALK Walk;
1747 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
1748 if (RT_FAILURE(rc))
1749 {
1750 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1751 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1752 }
1753
1754 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1755 Assert(Walk.fSucceeded);
1756 pTlbe->uTag = uTag;
1757 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D))
1758 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
1759 pTlbe->GCPhys = Walk.GCPhys;
1760 pTlbe->pbMappingR3 = NULL;
1761 }
1762
1763 /*
1764 * Check TLB page table level access flags.
1765 */
1766 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1767 {
1768 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1769 {
1770 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1771 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1772 }
1773 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1774 {
1775 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1776 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1777 }
1778 }
1779
1780 /*
1781 * Look up the physical page info if necessary.
1782 */
1783 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1784 { /* not necessary */ }
1785 else
1786 {
1787 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1788 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1789 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1790 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1791 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1792 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1793 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1794 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1795 }
1796
1797# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1798 /*
1799 * Try do a direct read using the pbMappingR3 pointer.
1800 */
1801 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1802 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1803 {
1804 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1805 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1806 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1807 {
1808 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1809 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1810 }
1811 else
1812 {
1813 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1814 Assert(cbInstr < cbMaxRead);
1815 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1816 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1817 }
1818 if (cbDst <= cbMaxRead)
1819 {
1820 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1821 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1822 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1823 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1824 return;
1825 }
1826 pVCpu->iem.s.pbInstrBuf = NULL;
1827
1828 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1829 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1830 }
1831 else
1832# endif
1833#if 0
1834 /*
1835 * If there is no special read handling, so we can read a bit more and
1836 * put it in the prefetch buffer.
1837 */
1838 if ( cbDst < cbMaxRead
1839 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1840 {
1841 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1842 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1843 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1844 { /* likely */ }
1845 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1846 {
1847 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1848 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1849 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1850 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1851 }
1852 else
1853 {
1854 Log((RT_SUCCESS(rcStrict)
1855 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1856 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1857 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1858 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1859 }
1860 }
1861 /*
1862 * Special read handling, so only read exactly what's needed.
1863 * This is a highly unlikely scenario.
1864 */
1865 else
1866#endif
1867 {
1868 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1869 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1870 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1871 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1872 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1873 { /* likely */ }
1874 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1875 {
1876 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1877 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1878 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1879 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1880 }
1881 else
1882 {
1883 Log((RT_SUCCESS(rcStrict)
1884 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1885 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1886 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1887 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1888 }
1889 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1890 if (cbToRead == cbDst)
1891 return;
1892 }
1893
1894 /*
1895 * More to read, loop.
1896 */
1897 cbDst -= cbMaxRead;
1898 pvDst = (uint8_t *)pvDst + cbMaxRead;
1899 }
1900#else
1901 RT_NOREF(pvDst, cbDst);
1902 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1903#endif
1904}
1905
1906#else
1907
1908/**
1909 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1910 * exception if it fails.
1911 *
1912 * @returns Strict VBox status code.
1913 * @param pVCpu The cross context virtual CPU structure of the
1914 * calling thread.
1915 * @param cbMin The minimum number of bytes relative offOpcode
1916 * that must be read.
1917 */
1918IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1919{
1920 /*
1921 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1922 *
1923 * First translate CS:rIP to a physical address.
1924 */
1925 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1926 uint32_t cbToTryRead;
1927 RTGCPTR GCPtrNext;
1928 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1929 {
1930 cbToTryRead = PAGE_SIZE;
1931 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1932 if (!IEM_IS_CANONICAL(GCPtrNext))
1933 return iemRaiseGeneralProtectionFault0(pVCpu);
1934 }
1935 else
1936 {
1937 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1938 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1939 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1940 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1941 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1942 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1943 if (!cbToTryRead) /* overflowed */
1944 {
1945 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1946 cbToTryRead = UINT32_MAX;
1947 /** @todo check out wrapping around the code segment. */
1948 }
1949 if (cbToTryRead < cbMin - cbLeft)
1950 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1951 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1952 }
1953
1954 /* Only read up to the end of the page, and make sure we don't read more
1955 than the opcode buffer can hold. */
1956 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1957 if (cbToTryRead > cbLeftOnPage)
1958 cbToTryRead = cbLeftOnPage;
1959 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1960 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1961/** @todo r=bird: Convert assertion into undefined opcode exception? */
1962 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1963
1964 PGMPTWALK Walk;
1965 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1966 if (RT_FAILURE(rc))
1967 {
1968 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1969 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1970 }
1971 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1972 {
1973 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1974 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1975 }
1976 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1977 {
1978 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1979 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1980 }
1981 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & PAGE_OFFSET_MASK);
1982 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1983 /** @todo Check reserved bits and such stuff. PGM is better at doing
1984 * that, so do it when implementing the guest virtual address
1985 * TLB... */
1986
1987 /*
1988 * Read the bytes at this address.
1989 *
1990 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
1991 * and since PATM should only patch the start of an instruction there
1992 * should be no need to check again here.
1993 */
1994 if (!pVCpu->iem.s.fBypassHandlers)
1995 {
1996 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
1997 cbToTryRead, PGMACCESSORIGIN_IEM);
1998 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1999 { /* likely */ }
2000 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2001 {
2002 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2003 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2004 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2005 }
2006 else
2007 {
2008 Log((RT_SUCCESS(rcStrict)
2009 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2010 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2011 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2012 return rcStrict;
2013 }
2014 }
2015 else
2016 {
2017 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2018 if (RT_SUCCESS(rc))
2019 { /* likely */ }
2020 else
2021 {
2022 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2023 return rc;
2024 }
2025 }
2026 pVCpu->iem.s.cbOpcode += cbToTryRead;
2027 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2028
2029 return VINF_SUCCESS;
2030}
2031
2032#endif /* !IEM_WITH_CODE_TLB */
2033#ifndef IEM_WITH_SETJMP
2034
2035/**
2036 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2037 *
2038 * @returns Strict VBox status code.
2039 * @param pVCpu The cross context virtual CPU structure of the
2040 * calling thread.
2041 * @param pb Where to return the opcode byte.
2042 */
2043DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2044{
2045 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2046 if (rcStrict == VINF_SUCCESS)
2047 {
2048 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2049 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2050 pVCpu->iem.s.offOpcode = offOpcode + 1;
2051 }
2052 else
2053 *pb = 0;
2054 return rcStrict;
2055}
2056
2057
2058/**
2059 * Fetches the next opcode byte.
2060 *
2061 * @returns Strict VBox status code.
2062 * @param pVCpu The cross context virtual CPU structure of the
2063 * calling thread.
2064 * @param pu8 Where to return the opcode byte.
2065 */
2066DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2067{
2068 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2069 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2070 {
2071 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2072 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2073 return VINF_SUCCESS;
2074 }
2075 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2076}
2077
2078#else /* IEM_WITH_SETJMP */
2079
2080/**
2081 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2082 *
2083 * @returns The opcode byte.
2084 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2085 */
2086DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2087{
2088# ifdef IEM_WITH_CODE_TLB
2089 uint8_t u8;
2090 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2091 return u8;
2092# else
2093 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2094 if (rcStrict == VINF_SUCCESS)
2095 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2096 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2097# endif
2098}
2099
2100
2101/**
2102 * Fetches the next opcode byte, longjmp on error.
2103 *
2104 * @returns The opcode byte.
2105 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2106 */
2107DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2108{
2109# ifdef IEM_WITH_CODE_TLB
2110 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2111 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2112 if (RT_LIKELY( pbBuf != NULL
2113 && offBuf < pVCpu->iem.s.cbInstrBuf))
2114 {
2115 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2116 return pbBuf[offBuf];
2117 }
2118# else
2119 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2120 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2121 {
2122 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2123 return pVCpu->iem.s.abOpcode[offOpcode];
2124 }
2125# endif
2126 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2127}
2128
2129#endif /* IEM_WITH_SETJMP */
2130
2131/**
2132 * Fetches the next opcode byte, returns automatically on failure.
2133 *
2134 * @param a_pu8 Where to return the opcode byte.
2135 * @remark Implicitly references pVCpu.
2136 */
2137#ifndef IEM_WITH_SETJMP
2138# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2139 do \
2140 { \
2141 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2142 if (rcStrict2 == VINF_SUCCESS) \
2143 { /* likely */ } \
2144 else \
2145 return rcStrict2; \
2146 } while (0)
2147#else
2148# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2149#endif /* IEM_WITH_SETJMP */
2150
2151
2152#ifndef IEM_WITH_SETJMP
2153/**
2154 * Fetches the next signed byte from the opcode stream.
2155 *
2156 * @returns Strict VBox status code.
2157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2158 * @param pi8 Where to return the signed byte.
2159 */
2160DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2161{
2162 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2163}
2164#endif /* !IEM_WITH_SETJMP */
2165
2166
2167/**
2168 * Fetches the next signed byte from the opcode stream, returning automatically
2169 * on failure.
2170 *
2171 * @param a_pi8 Where to return the signed byte.
2172 * @remark Implicitly references pVCpu.
2173 */
2174#ifndef IEM_WITH_SETJMP
2175# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2176 do \
2177 { \
2178 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2179 if (rcStrict2 != VINF_SUCCESS) \
2180 return rcStrict2; \
2181 } while (0)
2182#else /* IEM_WITH_SETJMP */
2183# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2184
2185#endif /* IEM_WITH_SETJMP */
2186
2187#ifndef IEM_WITH_SETJMP
2188
2189/**
2190 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2191 *
2192 * @returns Strict VBox status code.
2193 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2194 * @param pu16 Where to return the opcode dword.
2195 */
2196DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2197{
2198 uint8_t u8;
2199 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2200 if (rcStrict == VINF_SUCCESS)
2201 *pu16 = (int8_t)u8;
2202 return rcStrict;
2203}
2204
2205
2206/**
2207 * Fetches the next signed byte from the opcode stream, extending it to
2208 * unsigned 16-bit.
2209 *
2210 * @returns Strict VBox status code.
2211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2212 * @param pu16 Where to return the unsigned word.
2213 */
2214DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2215{
2216 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2217 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2218 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2219
2220 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2221 pVCpu->iem.s.offOpcode = offOpcode + 1;
2222 return VINF_SUCCESS;
2223}
2224
2225#endif /* !IEM_WITH_SETJMP */
2226
2227/**
2228 * Fetches the next signed byte from the opcode stream and sign-extending it to
2229 * a word, returning automatically on failure.
2230 *
2231 * @param a_pu16 Where to return the word.
2232 * @remark Implicitly references pVCpu.
2233 */
2234#ifndef IEM_WITH_SETJMP
2235# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2236 do \
2237 { \
2238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2239 if (rcStrict2 != VINF_SUCCESS) \
2240 return rcStrict2; \
2241 } while (0)
2242#else
2243# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2244#endif
2245
2246#ifndef IEM_WITH_SETJMP
2247
2248/**
2249 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2250 *
2251 * @returns Strict VBox status code.
2252 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2253 * @param pu32 Where to return the opcode dword.
2254 */
2255DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2256{
2257 uint8_t u8;
2258 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2259 if (rcStrict == VINF_SUCCESS)
2260 *pu32 = (int8_t)u8;
2261 return rcStrict;
2262}
2263
2264
2265/**
2266 * Fetches the next signed byte from the opcode stream, extending it to
2267 * unsigned 32-bit.
2268 *
2269 * @returns Strict VBox status code.
2270 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2271 * @param pu32 Where to return the unsigned dword.
2272 */
2273DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2274{
2275 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2276 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2277 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2278
2279 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2280 pVCpu->iem.s.offOpcode = offOpcode + 1;
2281 return VINF_SUCCESS;
2282}
2283
2284#endif /* !IEM_WITH_SETJMP */
2285
2286/**
2287 * Fetches the next signed byte from the opcode stream and sign-extending it to
2288 * a word, returning automatically on failure.
2289 *
2290 * @param a_pu32 Where to return the word.
2291 * @remark Implicitly references pVCpu.
2292 */
2293#ifndef IEM_WITH_SETJMP
2294#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2295 do \
2296 { \
2297 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2298 if (rcStrict2 != VINF_SUCCESS) \
2299 return rcStrict2; \
2300 } while (0)
2301#else
2302# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2303#endif
2304
2305#ifndef IEM_WITH_SETJMP
2306
2307/**
2308 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2309 *
2310 * @returns Strict VBox status code.
2311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2312 * @param pu64 Where to return the opcode qword.
2313 */
2314DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2315{
2316 uint8_t u8;
2317 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2318 if (rcStrict == VINF_SUCCESS)
2319 *pu64 = (int8_t)u8;
2320 return rcStrict;
2321}
2322
2323
2324/**
2325 * Fetches the next signed byte from the opcode stream, extending it to
2326 * unsigned 64-bit.
2327 *
2328 * @returns Strict VBox status code.
2329 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2330 * @param pu64 Where to return the unsigned qword.
2331 */
2332DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2333{
2334 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2335 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2336 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2337
2338 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2339 pVCpu->iem.s.offOpcode = offOpcode + 1;
2340 return VINF_SUCCESS;
2341}
2342
2343#endif /* !IEM_WITH_SETJMP */
2344
2345
2346/**
2347 * Fetches the next signed byte from the opcode stream and sign-extending it to
2348 * a word, returning automatically on failure.
2349 *
2350 * @param a_pu64 Where to return the word.
2351 * @remark Implicitly references pVCpu.
2352 */
2353#ifndef IEM_WITH_SETJMP
2354# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2355 do \
2356 { \
2357 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2358 if (rcStrict2 != VINF_SUCCESS) \
2359 return rcStrict2; \
2360 } while (0)
2361#else
2362# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2363#endif
2364
2365
2366#ifndef IEM_WITH_SETJMP
2367/**
2368 * Fetches the next opcode byte.
2369 *
2370 * @returns Strict VBox status code.
2371 * @param pVCpu The cross context virtual CPU structure of the
2372 * calling thread.
2373 * @param pu8 Where to return the opcode byte.
2374 */
2375DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2376{
2377 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2378 pVCpu->iem.s.offModRm = offOpcode;
2379 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2380 {
2381 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2382 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2383 return VINF_SUCCESS;
2384 }
2385 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2386}
2387#else /* IEM_WITH_SETJMP */
2388/**
2389 * Fetches the next opcode byte, longjmp on error.
2390 *
2391 * @returns The opcode byte.
2392 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2393 */
2394DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2395{
2396# ifdef IEM_WITH_CODE_TLB
2397 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2398 pVCpu->iem.s.offModRm = offBuf;
2399 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2400 if (RT_LIKELY( pbBuf != NULL
2401 && offBuf < pVCpu->iem.s.cbInstrBuf))
2402 {
2403 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2404 return pbBuf[offBuf];
2405 }
2406# else
2407 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2408 pVCpu->iem.s.offModRm = offOpcode;
2409 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2410 {
2411 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2412 return pVCpu->iem.s.abOpcode[offOpcode];
2413 }
2414# endif
2415 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2416}
2417#endif /* IEM_WITH_SETJMP */
2418
2419/**
2420 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2421 * on failure.
2422 *
2423 * Will note down the position of the ModR/M byte for VT-x exits.
2424 *
2425 * @param a_pbRm Where to return the RM opcode byte.
2426 * @remark Implicitly references pVCpu.
2427 */
2428#ifndef IEM_WITH_SETJMP
2429# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2430 do \
2431 { \
2432 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2433 if (rcStrict2 == VINF_SUCCESS) \
2434 { /* likely */ } \
2435 else \
2436 return rcStrict2; \
2437 } while (0)
2438#else
2439# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2440#endif /* IEM_WITH_SETJMP */
2441
2442
2443#ifndef IEM_WITH_SETJMP
2444
2445/**
2446 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2447 *
2448 * @returns Strict VBox status code.
2449 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2450 * @param pu16 Where to return the opcode word.
2451 */
2452DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2453{
2454 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2455 if (rcStrict == VINF_SUCCESS)
2456 {
2457 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2458# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2459 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2460# else
2461 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2462# endif
2463 pVCpu->iem.s.offOpcode = offOpcode + 2;
2464 }
2465 else
2466 *pu16 = 0;
2467 return rcStrict;
2468}
2469
2470
2471/**
2472 * Fetches the next opcode word.
2473 *
2474 * @returns Strict VBox status code.
2475 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2476 * @param pu16 Where to return the opcode word.
2477 */
2478DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2479{
2480 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2481 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2482 {
2483 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2484# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2485 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2486# else
2487 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2488# endif
2489 return VINF_SUCCESS;
2490 }
2491 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2492}
2493
2494#else /* IEM_WITH_SETJMP */
2495
2496/**
2497 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2498 *
2499 * @returns The opcode word.
2500 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2501 */
2502DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2503{
2504# ifdef IEM_WITH_CODE_TLB
2505 uint16_t u16;
2506 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2507 return u16;
2508# else
2509 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2510 if (rcStrict == VINF_SUCCESS)
2511 {
2512 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2513 pVCpu->iem.s.offOpcode += 2;
2514# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2515 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2516# else
2517 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2518# endif
2519 }
2520 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2521# endif
2522}
2523
2524
2525/**
2526 * Fetches the next opcode word, longjmp on error.
2527 *
2528 * @returns The opcode word.
2529 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2530 */
2531DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2532{
2533# ifdef IEM_WITH_CODE_TLB
2534 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2535 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2536 if (RT_LIKELY( pbBuf != NULL
2537 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2538 {
2539 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2540# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2541 return *(uint16_t const *)&pbBuf[offBuf];
2542# else
2543 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2544# endif
2545 }
2546# else
2547 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2548 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2549 {
2550 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2551# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2552 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2553# else
2554 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2555# endif
2556 }
2557# endif
2558 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2559}
2560
2561#endif /* IEM_WITH_SETJMP */
2562
2563
2564/**
2565 * Fetches the next opcode word, returns automatically on failure.
2566 *
2567 * @param a_pu16 Where to return the opcode word.
2568 * @remark Implicitly references pVCpu.
2569 */
2570#ifndef IEM_WITH_SETJMP
2571# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2572 do \
2573 { \
2574 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2575 if (rcStrict2 != VINF_SUCCESS) \
2576 return rcStrict2; \
2577 } while (0)
2578#else
2579# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2580#endif
2581
2582#ifndef IEM_WITH_SETJMP
2583
2584/**
2585 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2586 *
2587 * @returns Strict VBox status code.
2588 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2589 * @param pu32 Where to return the opcode double word.
2590 */
2591DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2592{
2593 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2594 if (rcStrict == VINF_SUCCESS)
2595 {
2596 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2597 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2598 pVCpu->iem.s.offOpcode = offOpcode + 2;
2599 }
2600 else
2601 *pu32 = 0;
2602 return rcStrict;
2603}
2604
2605
2606/**
2607 * Fetches the next opcode word, zero extending it to a double word.
2608 *
2609 * @returns Strict VBox status code.
2610 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2611 * @param pu32 Where to return the opcode double word.
2612 */
2613DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2614{
2615 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2616 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2617 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2618
2619 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2620 pVCpu->iem.s.offOpcode = offOpcode + 2;
2621 return VINF_SUCCESS;
2622}
2623
2624#endif /* !IEM_WITH_SETJMP */
2625
2626
2627/**
2628 * Fetches the next opcode word and zero extends it to a double word, returns
2629 * automatically on failure.
2630 *
2631 * @param a_pu32 Where to return the opcode double word.
2632 * @remark Implicitly references pVCpu.
2633 */
2634#ifndef IEM_WITH_SETJMP
2635# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2636 do \
2637 { \
2638 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2639 if (rcStrict2 != VINF_SUCCESS) \
2640 return rcStrict2; \
2641 } while (0)
2642#else
2643# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2644#endif
2645
2646#ifndef IEM_WITH_SETJMP
2647
2648/**
2649 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2650 *
2651 * @returns Strict VBox status code.
2652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2653 * @param pu64 Where to return the opcode quad word.
2654 */
2655DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2656{
2657 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2658 if (rcStrict == VINF_SUCCESS)
2659 {
2660 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2661 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2662 pVCpu->iem.s.offOpcode = offOpcode + 2;
2663 }
2664 else
2665 *pu64 = 0;
2666 return rcStrict;
2667}
2668
2669
2670/**
2671 * Fetches the next opcode word, zero extending it to a quad word.
2672 *
2673 * @returns Strict VBox status code.
2674 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2675 * @param pu64 Where to return the opcode quad word.
2676 */
2677DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2678{
2679 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2680 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2681 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2682
2683 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2684 pVCpu->iem.s.offOpcode = offOpcode + 2;
2685 return VINF_SUCCESS;
2686}
2687
2688#endif /* !IEM_WITH_SETJMP */
2689
2690/**
2691 * Fetches the next opcode word and zero extends it to a quad word, returns
2692 * automatically on failure.
2693 *
2694 * @param a_pu64 Where to return the opcode quad word.
2695 * @remark Implicitly references pVCpu.
2696 */
2697#ifndef IEM_WITH_SETJMP
2698# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2699 do \
2700 { \
2701 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2702 if (rcStrict2 != VINF_SUCCESS) \
2703 return rcStrict2; \
2704 } while (0)
2705#else
2706# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2707#endif
2708
2709
2710#ifndef IEM_WITH_SETJMP
2711/**
2712 * Fetches the next signed word from the opcode stream.
2713 *
2714 * @returns Strict VBox status code.
2715 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2716 * @param pi16 Where to return the signed word.
2717 */
2718DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2719{
2720 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2721}
2722#endif /* !IEM_WITH_SETJMP */
2723
2724
2725/**
2726 * Fetches the next signed word from the opcode stream, returning automatically
2727 * on failure.
2728 *
2729 * @param a_pi16 Where to return the signed word.
2730 * @remark Implicitly references pVCpu.
2731 */
2732#ifndef IEM_WITH_SETJMP
2733# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2734 do \
2735 { \
2736 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2737 if (rcStrict2 != VINF_SUCCESS) \
2738 return rcStrict2; \
2739 } while (0)
2740#else
2741# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2742#endif
2743
2744#ifndef IEM_WITH_SETJMP
2745
2746/**
2747 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2748 *
2749 * @returns Strict VBox status code.
2750 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2751 * @param pu32 Where to return the opcode dword.
2752 */
2753DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2754{
2755 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2756 if (rcStrict == VINF_SUCCESS)
2757 {
2758 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2759# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2760 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2761# else
2762 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2763 pVCpu->iem.s.abOpcode[offOpcode + 1],
2764 pVCpu->iem.s.abOpcode[offOpcode + 2],
2765 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2766# endif
2767 pVCpu->iem.s.offOpcode = offOpcode + 4;
2768 }
2769 else
2770 *pu32 = 0;
2771 return rcStrict;
2772}
2773
2774
2775/**
2776 * Fetches the next opcode dword.
2777 *
2778 * @returns Strict VBox status code.
2779 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2780 * @param pu32 Where to return the opcode double word.
2781 */
2782DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2783{
2784 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2785 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2786 {
2787 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2788# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2789 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2790# else
2791 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2792 pVCpu->iem.s.abOpcode[offOpcode + 1],
2793 pVCpu->iem.s.abOpcode[offOpcode + 2],
2794 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2795# endif
2796 return VINF_SUCCESS;
2797 }
2798 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2799}
2800
2801#else /* !IEM_WITH_SETJMP */
2802
2803/**
2804 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2805 *
2806 * @returns The opcode dword.
2807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2808 */
2809DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2810{
2811# ifdef IEM_WITH_CODE_TLB
2812 uint32_t u32;
2813 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2814 return u32;
2815# else
2816 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2817 if (rcStrict == VINF_SUCCESS)
2818 {
2819 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2820 pVCpu->iem.s.offOpcode = offOpcode + 4;
2821# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2822 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2823# else
2824 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2825 pVCpu->iem.s.abOpcode[offOpcode + 1],
2826 pVCpu->iem.s.abOpcode[offOpcode + 2],
2827 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2828# endif
2829 }
2830 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2831# endif
2832}
2833
2834
2835/**
2836 * Fetches the next opcode dword, longjmp on error.
2837 *
2838 * @returns The opcode dword.
2839 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2840 */
2841DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2842{
2843# ifdef IEM_WITH_CODE_TLB
2844 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2845 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2846 if (RT_LIKELY( pbBuf != NULL
2847 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2848 {
2849 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2850# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2851 return *(uint32_t const *)&pbBuf[offBuf];
2852# else
2853 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2854 pbBuf[offBuf + 1],
2855 pbBuf[offBuf + 2],
2856 pbBuf[offBuf + 3]);
2857# endif
2858 }
2859# else
2860 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2861 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2862 {
2863 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2864# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2865 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2866# else
2867 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2868 pVCpu->iem.s.abOpcode[offOpcode + 1],
2869 pVCpu->iem.s.abOpcode[offOpcode + 2],
2870 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2871# endif
2872 }
2873# endif
2874 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2875}
2876
2877#endif /* !IEM_WITH_SETJMP */
2878
2879
2880/**
2881 * Fetches the next opcode dword, returns automatically on failure.
2882 *
2883 * @param a_pu32 Where to return the opcode dword.
2884 * @remark Implicitly references pVCpu.
2885 */
2886#ifndef IEM_WITH_SETJMP
2887# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2888 do \
2889 { \
2890 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2891 if (rcStrict2 != VINF_SUCCESS) \
2892 return rcStrict2; \
2893 } while (0)
2894#else
2895# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2896#endif
2897
2898#ifndef IEM_WITH_SETJMP
2899
2900/**
2901 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2902 *
2903 * @returns Strict VBox status code.
2904 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2905 * @param pu64 Where to return the opcode dword.
2906 */
2907DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2908{
2909 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2910 if (rcStrict == VINF_SUCCESS)
2911 {
2912 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2913 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2914 pVCpu->iem.s.abOpcode[offOpcode + 1],
2915 pVCpu->iem.s.abOpcode[offOpcode + 2],
2916 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2917 pVCpu->iem.s.offOpcode = offOpcode + 4;
2918 }
2919 else
2920 *pu64 = 0;
2921 return rcStrict;
2922}
2923
2924
2925/**
2926 * Fetches the next opcode dword, zero extending it to a quad word.
2927 *
2928 * @returns Strict VBox status code.
2929 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2930 * @param pu64 Where to return the opcode quad word.
2931 */
2932DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2933{
2934 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2935 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2936 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2937
2938 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2939 pVCpu->iem.s.abOpcode[offOpcode + 1],
2940 pVCpu->iem.s.abOpcode[offOpcode + 2],
2941 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2942 pVCpu->iem.s.offOpcode = offOpcode + 4;
2943 return VINF_SUCCESS;
2944}
2945
2946#endif /* !IEM_WITH_SETJMP */
2947
2948
2949/**
2950 * Fetches the next opcode dword and zero extends it to a quad word, returns
2951 * automatically on failure.
2952 *
2953 * @param a_pu64 Where to return the opcode quad word.
2954 * @remark Implicitly references pVCpu.
2955 */
2956#ifndef IEM_WITH_SETJMP
2957# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2958 do \
2959 { \
2960 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2961 if (rcStrict2 != VINF_SUCCESS) \
2962 return rcStrict2; \
2963 } while (0)
2964#else
2965# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2966#endif
2967
2968
2969#ifndef IEM_WITH_SETJMP
2970/**
2971 * Fetches the next signed double word from the opcode stream.
2972 *
2973 * @returns Strict VBox status code.
2974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2975 * @param pi32 Where to return the signed double word.
2976 */
2977DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2978{
2979 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2980}
2981#endif
2982
2983/**
2984 * Fetches the next signed double word from the opcode stream, returning
2985 * automatically on failure.
2986 *
2987 * @param a_pi32 Where to return the signed double word.
2988 * @remark Implicitly references pVCpu.
2989 */
2990#ifndef IEM_WITH_SETJMP
2991# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
2992 do \
2993 { \
2994 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
2995 if (rcStrict2 != VINF_SUCCESS) \
2996 return rcStrict2; \
2997 } while (0)
2998#else
2999# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3000#endif
3001
3002#ifndef IEM_WITH_SETJMP
3003
3004/**
3005 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3006 *
3007 * @returns Strict VBox status code.
3008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3009 * @param pu64 Where to return the opcode qword.
3010 */
3011DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3012{
3013 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3014 if (rcStrict == VINF_SUCCESS)
3015 {
3016 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3017 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3018 pVCpu->iem.s.abOpcode[offOpcode + 1],
3019 pVCpu->iem.s.abOpcode[offOpcode + 2],
3020 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3021 pVCpu->iem.s.offOpcode = offOpcode + 4;
3022 }
3023 else
3024 *pu64 = 0;
3025 return rcStrict;
3026}
3027
3028
3029/**
3030 * Fetches the next opcode dword, sign extending it into a quad word.
3031 *
3032 * @returns Strict VBox status code.
3033 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3034 * @param pu64 Where to return the opcode quad word.
3035 */
3036DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3037{
3038 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3039 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3040 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3041
3042 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3043 pVCpu->iem.s.abOpcode[offOpcode + 1],
3044 pVCpu->iem.s.abOpcode[offOpcode + 2],
3045 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3046 *pu64 = i32;
3047 pVCpu->iem.s.offOpcode = offOpcode + 4;
3048 return VINF_SUCCESS;
3049}
3050
3051#endif /* !IEM_WITH_SETJMP */
3052
3053
3054/**
3055 * Fetches the next opcode double word and sign extends it to a quad word,
3056 * returns automatically on failure.
3057 *
3058 * @param a_pu64 Where to return the opcode quad word.
3059 * @remark Implicitly references pVCpu.
3060 */
3061#ifndef IEM_WITH_SETJMP
3062# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3063 do \
3064 { \
3065 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3066 if (rcStrict2 != VINF_SUCCESS) \
3067 return rcStrict2; \
3068 } while (0)
3069#else
3070# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3071#endif
3072
3073#ifndef IEM_WITH_SETJMP
3074
3075/**
3076 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3077 *
3078 * @returns Strict VBox status code.
3079 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3080 * @param pu64 Where to return the opcode qword.
3081 */
3082DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3083{
3084 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3085 if (rcStrict == VINF_SUCCESS)
3086 {
3087 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3088# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3089 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3090# else
3091 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3092 pVCpu->iem.s.abOpcode[offOpcode + 1],
3093 pVCpu->iem.s.abOpcode[offOpcode + 2],
3094 pVCpu->iem.s.abOpcode[offOpcode + 3],
3095 pVCpu->iem.s.abOpcode[offOpcode + 4],
3096 pVCpu->iem.s.abOpcode[offOpcode + 5],
3097 pVCpu->iem.s.abOpcode[offOpcode + 6],
3098 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3099# endif
3100 pVCpu->iem.s.offOpcode = offOpcode + 8;
3101 }
3102 else
3103 *pu64 = 0;
3104 return rcStrict;
3105}
3106
3107
3108/**
3109 * Fetches the next opcode qword.
3110 *
3111 * @returns Strict VBox status code.
3112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3113 * @param pu64 Where to return the opcode qword.
3114 */
3115DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3116{
3117 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3118 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3119 {
3120# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3121 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3122# else
3123 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3124 pVCpu->iem.s.abOpcode[offOpcode + 1],
3125 pVCpu->iem.s.abOpcode[offOpcode + 2],
3126 pVCpu->iem.s.abOpcode[offOpcode + 3],
3127 pVCpu->iem.s.abOpcode[offOpcode + 4],
3128 pVCpu->iem.s.abOpcode[offOpcode + 5],
3129 pVCpu->iem.s.abOpcode[offOpcode + 6],
3130 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3131# endif
3132 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3133 return VINF_SUCCESS;
3134 }
3135 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3136}
3137
3138#else /* IEM_WITH_SETJMP */
3139
3140/**
3141 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3142 *
3143 * @returns The opcode qword.
3144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3145 */
3146DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3147{
3148# ifdef IEM_WITH_CODE_TLB
3149 uint64_t u64;
3150 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3151 return u64;
3152# else
3153 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3154 if (rcStrict == VINF_SUCCESS)
3155 {
3156 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3157 pVCpu->iem.s.offOpcode = offOpcode + 8;
3158# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3159 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3160# else
3161 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3162 pVCpu->iem.s.abOpcode[offOpcode + 1],
3163 pVCpu->iem.s.abOpcode[offOpcode + 2],
3164 pVCpu->iem.s.abOpcode[offOpcode + 3],
3165 pVCpu->iem.s.abOpcode[offOpcode + 4],
3166 pVCpu->iem.s.abOpcode[offOpcode + 5],
3167 pVCpu->iem.s.abOpcode[offOpcode + 6],
3168 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3169# endif
3170 }
3171 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3172# endif
3173}
3174
3175
3176/**
3177 * Fetches the next opcode qword, longjmp on error.
3178 *
3179 * @returns The opcode qword.
3180 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3181 */
3182DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3183{
3184# ifdef IEM_WITH_CODE_TLB
3185 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3186 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3187 if (RT_LIKELY( pbBuf != NULL
3188 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3189 {
3190 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3191# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3192 return *(uint64_t const *)&pbBuf[offBuf];
3193# else
3194 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3195 pbBuf[offBuf + 1],
3196 pbBuf[offBuf + 2],
3197 pbBuf[offBuf + 3],
3198 pbBuf[offBuf + 4],
3199 pbBuf[offBuf + 5],
3200 pbBuf[offBuf + 6],
3201 pbBuf[offBuf + 7]);
3202# endif
3203 }
3204# else
3205 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3206 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3207 {
3208 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3209# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3210 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3211# else
3212 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3213 pVCpu->iem.s.abOpcode[offOpcode + 1],
3214 pVCpu->iem.s.abOpcode[offOpcode + 2],
3215 pVCpu->iem.s.abOpcode[offOpcode + 3],
3216 pVCpu->iem.s.abOpcode[offOpcode + 4],
3217 pVCpu->iem.s.abOpcode[offOpcode + 5],
3218 pVCpu->iem.s.abOpcode[offOpcode + 6],
3219 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3220# endif
3221 }
3222# endif
3223 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3224}
3225
3226#endif /* IEM_WITH_SETJMP */
3227
3228/**
3229 * Fetches the next opcode quad word, returns automatically on failure.
3230 *
3231 * @param a_pu64 Where to return the opcode quad word.
3232 * @remark Implicitly references pVCpu.
3233 */
3234#ifndef IEM_WITH_SETJMP
3235# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3236 do \
3237 { \
3238 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3239 if (rcStrict2 != VINF_SUCCESS) \
3240 return rcStrict2; \
3241 } while (0)
3242#else
3243# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3244#endif
3245
3246
3247/** @name Misc Worker Functions.
3248 * @{
3249 */
3250
3251/**
3252 * Gets the exception class for the specified exception vector.
3253 *
3254 * @returns The class of the specified exception.
3255 * @param uVector The exception vector.
3256 */
3257IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3258{
3259 Assert(uVector <= X86_XCPT_LAST);
3260 switch (uVector)
3261 {
3262 case X86_XCPT_DE:
3263 case X86_XCPT_TS:
3264 case X86_XCPT_NP:
3265 case X86_XCPT_SS:
3266 case X86_XCPT_GP:
3267 case X86_XCPT_SX: /* AMD only */
3268 return IEMXCPTCLASS_CONTRIBUTORY;
3269
3270 case X86_XCPT_PF:
3271 case X86_XCPT_VE: /* Intel only */
3272 return IEMXCPTCLASS_PAGE_FAULT;
3273
3274 case X86_XCPT_DF:
3275 return IEMXCPTCLASS_DOUBLE_FAULT;
3276 }
3277 return IEMXCPTCLASS_BENIGN;
3278}
3279
3280
3281/**
3282 * Evaluates how to handle an exception caused during delivery of another event
3283 * (exception / interrupt).
3284 *
3285 * @returns How to handle the recursive exception.
3286 * @param pVCpu The cross context virtual CPU structure of the
3287 * calling thread.
3288 * @param fPrevFlags The flags of the previous event.
3289 * @param uPrevVector The vector of the previous event.
3290 * @param fCurFlags The flags of the current exception.
3291 * @param uCurVector The vector of the current exception.
3292 * @param pfXcptRaiseInfo Where to store additional information about the
3293 * exception condition. Optional.
3294 */
3295VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3296 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3297{
3298 /*
3299 * Only CPU exceptions can be raised while delivering other events, software interrupt
3300 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3301 */
3302 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3303 Assert(pVCpu); RT_NOREF(pVCpu);
3304 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3305
3306 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3307 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3308 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3309 {
3310 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3311 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3312 {
3313 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3314 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3315 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3316 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3317 {
3318 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3319 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3320 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3321 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3322 uCurVector, pVCpu->cpum.GstCtx.cr2));
3323 }
3324 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3325 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3326 {
3327 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3328 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3329 }
3330 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3331 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3332 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3333 {
3334 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3335 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3336 }
3337 }
3338 else
3339 {
3340 if (uPrevVector == X86_XCPT_NMI)
3341 {
3342 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3343 if (uCurVector == X86_XCPT_PF)
3344 {
3345 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3346 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3347 }
3348 }
3349 else if ( uPrevVector == X86_XCPT_AC
3350 && uCurVector == X86_XCPT_AC)
3351 {
3352 enmRaise = IEMXCPTRAISE_CPU_HANG;
3353 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3354 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3355 }
3356 }
3357 }
3358 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3359 {
3360 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3361 if (uCurVector == X86_XCPT_PF)
3362 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3363 }
3364 else
3365 {
3366 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3367 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3368 }
3369
3370 if (pfXcptRaiseInfo)
3371 *pfXcptRaiseInfo = fRaiseInfo;
3372 return enmRaise;
3373}
3374
3375
3376/**
3377 * Enters the CPU shutdown state initiated by a triple fault or other
3378 * unrecoverable conditions.
3379 *
3380 * @returns Strict VBox status code.
3381 * @param pVCpu The cross context virtual CPU structure of the
3382 * calling thread.
3383 */
3384IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3385{
3386 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3387 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3388
3389 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3390 {
3391 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3392 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3393 }
3394
3395 RT_NOREF(pVCpu);
3396 return VINF_EM_TRIPLE_FAULT;
3397}
3398
3399
3400/**
3401 * Validates a new SS segment.
3402 *
3403 * @returns VBox strict status code.
3404 * @param pVCpu The cross context virtual CPU structure of the
3405 * calling thread.
3406 * @param NewSS The new SS selctor.
3407 * @param uCpl The CPL to load the stack for.
3408 * @param pDesc Where to return the descriptor.
3409 */
3410IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3411{
3412 /* Null selectors are not allowed (we're not called for dispatching
3413 interrupts with SS=0 in long mode). */
3414 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3415 {
3416 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3417 return iemRaiseTaskSwitchFault0(pVCpu);
3418 }
3419
3420 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3421 if ((NewSS & X86_SEL_RPL) != uCpl)
3422 {
3423 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3424 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3425 }
3426
3427 /*
3428 * Read the descriptor.
3429 */
3430 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3431 if (rcStrict != VINF_SUCCESS)
3432 return rcStrict;
3433
3434 /*
3435 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3436 */
3437 if (!pDesc->Legacy.Gen.u1DescType)
3438 {
3439 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3440 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3441 }
3442
3443 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3444 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3445 {
3446 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3447 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3448 }
3449 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3450 {
3451 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3452 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3453 }
3454
3455 /* Is it there? */
3456 /** @todo testcase: Is this checked before the canonical / limit check below? */
3457 if (!pDesc->Legacy.Gen.u1Present)
3458 {
3459 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3460 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3461 }
3462
3463 return VINF_SUCCESS;
3464}
3465
3466
3467/**
3468 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3469 * not (kind of obsolete now).
3470 *
3471 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3472 */
3473#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3474
3475/**
3476 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3477 *
3478 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3479 * @param a_fEfl The new EFLAGS.
3480 */
3481#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3482
3483/** @} */
3484
3485
3486/** @name Raising Exceptions.
3487 *
3488 * @{
3489 */
3490
3491
3492/**
3493 * Loads the specified stack far pointer from the TSS.
3494 *
3495 * @returns VBox strict status code.
3496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3497 * @param uCpl The CPL to load the stack for.
3498 * @param pSelSS Where to return the new stack segment.
3499 * @param puEsp Where to return the new stack pointer.
3500 */
3501IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3502{
3503 VBOXSTRICTRC rcStrict;
3504 Assert(uCpl < 4);
3505
3506 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3507 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3508 {
3509 /*
3510 * 16-bit TSS (X86TSS16).
3511 */
3512 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3513 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3514 {
3515 uint32_t off = uCpl * 4 + 2;
3516 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3517 {
3518 /** @todo check actual access pattern here. */
3519 uint32_t u32Tmp = 0; /* gcc maybe... */
3520 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3521 if (rcStrict == VINF_SUCCESS)
3522 {
3523 *puEsp = RT_LOWORD(u32Tmp);
3524 *pSelSS = RT_HIWORD(u32Tmp);
3525 return VINF_SUCCESS;
3526 }
3527 }
3528 else
3529 {
3530 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3531 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3532 }
3533 break;
3534 }
3535
3536 /*
3537 * 32-bit TSS (X86TSS32).
3538 */
3539 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3540 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3541 {
3542 uint32_t off = uCpl * 8 + 4;
3543 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3544 {
3545/** @todo check actual access pattern here. */
3546 uint64_t u64Tmp;
3547 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3548 if (rcStrict == VINF_SUCCESS)
3549 {
3550 *puEsp = u64Tmp & UINT32_MAX;
3551 *pSelSS = (RTSEL)(u64Tmp >> 32);
3552 return VINF_SUCCESS;
3553 }
3554 }
3555 else
3556 {
3557 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3558 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3559 }
3560 break;
3561 }
3562
3563 default:
3564 AssertFailed();
3565 rcStrict = VERR_IEM_IPE_4;
3566 break;
3567 }
3568
3569 *puEsp = 0; /* make gcc happy */
3570 *pSelSS = 0; /* make gcc happy */
3571 return rcStrict;
3572}
3573
3574
3575/**
3576 * Loads the specified stack pointer from the 64-bit TSS.
3577 *
3578 * @returns VBox strict status code.
3579 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3580 * @param uCpl The CPL to load the stack for.
3581 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3582 * @param puRsp Where to return the new stack pointer.
3583 */
3584IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3585{
3586 Assert(uCpl < 4);
3587 Assert(uIst < 8);
3588 *puRsp = 0; /* make gcc happy */
3589
3590 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3591 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3592
3593 uint32_t off;
3594 if (uIst)
3595 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3596 else
3597 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3598 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3599 {
3600 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3601 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3602 }
3603
3604 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3605}
3606
3607
3608/**
3609 * Adjust the CPU state according to the exception being raised.
3610 *
3611 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3612 * @param u8Vector The exception that has been raised.
3613 */
3614DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3615{
3616 switch (u8Vector)
3617 {
3618 case X86_XCPT_DB:
3619 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3620 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3621 break;
3622 /** @todo Read the AMD and Intel exception reference... */
3623 }
3624}
3625
3626
3627/**
3628 * Implements exceptions and interrupts for real mode.
3629 *
3630 * @returns VBox strict status code.
3631 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3632 * @param cbInstr The number of bytes to offset rIP by in the return
3633 * address.
3634 * @param u8Vector The interrupt / exception vector number.
3635 * @param fFlags The flags.
3636 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3637 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3638 */
3639IEM_STATIC VBOXSTRICTRC
3640iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3641 uint8_t cbInstr,
3642 uint8_t u8Vector,
3643 uint32_t fFlags,
3644 uint16_t uErr,
3645 uint64_t uCr2)
3646{
3647 NOREF(uErr); NOREF(uCr2);
3648 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3649
3650 /*
3651 * Read the IDT entry.
3652 */
3653 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3654 {
3655 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3656 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3657 }
3658 RTFAR16 Idte;
3659 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3660 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3661 {
3662 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3663 return rcStrict;
3664 }
3665
3666 /*
3667 * Push the stack frame.
3668 */
3669 uint16_t *pu16Frame;
3670 uint64_t uNewRsp;
3671 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3672 if (rcStrict != VINF_SUCCESS)
3673 return rcStrict;
3674
3675 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3676#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3677 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3678 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3679 fEfl |= UINT16_C(0xf000);
3680#endif
3681 pu16Frame[2] = (uint16_t)fEfl;
3682 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3683 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3684 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3685 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3686 return rcStrict;
3687
3688 /*
3689 * Load the vector address into cs:ip and make exception specific state
3690 * adjustments.
3691 */
3692 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3693 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3694 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3695 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3696 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3697 pVCpu->cpum.GstCtx.rip = Idte.off;
3698 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3699 IEMMISC_SET_EFL(pVCpu, fEfl);
3700
3701 /** @todo do we actually do this in real mode? */
3702 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3703 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3704
3705 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3706}
3707
3708
3709/**
3710 * Loads a NULL data selector into when coming from V8086 mode.
3711 *
3712 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3713 * @param pSReg Pointer to the segment register.
3714 */
3715IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3716{
3717 pSReg->Sel = 0;
3718 pSReg->ValidSel = 0;
3719 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3720 {
3721 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3722 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3723 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3724 }
3725 else
3726 {
3727 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3728 /** @todo check this on AMD-V */
3729 pSReg->u64Base = 0;
3730 pSReg->u32Limit = 0;
3731 }
3732}
3733
3734
3735/**
3736 * Loads a segment selector during a task switch in V8086 mode.
3737 *
3738 * @param pSReg Pointer to the segment register.
3739 * @param uSel The selector value to load.
3740 */
3741IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3742{
3743 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3744 pSReg->Sel = uSel;
3745 pSReg->ValidSel = uSel;
3746 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3747 pSReg->u64Base = uSel << 4;
3748 pSReg->u32Limit = 0xffff;
3749 pSReg->Attr.u = 0xf3;
3750}
3751
3752
3753/**
3754 * Loads a NULL data selector into a selector register, both the hidden and
3755 * visible parts, in protected mode.
3756 *
3757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3758 * @param pSReg Pointer to the segment register.
3759 * @param uRpl The RPL.
3760 */
3761IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3762{
3763 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3764 * data selector in protected mode. */
3765 pSReg->Sel = uRpl;
3766 pSReg->ValidSel = uRpl;
3767 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3768 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3769 {
3770 /* VT-x (Intel 3960x) observed doing something like this. */
3771 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3772 pSReg->u32Limit = UINT32_MAX;
3773 pSReg->u64Base = 0;
3774 }
3775 else
3776 {
3777 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3778 pSReg->u32Limit = 0;
3779 pSReg->u64Base = 0;
3780 }
3781}
3782
3783
3784/**
3785 * Loads a segment selector during a task switch in protected mode.
3786 *
3787 * In this task switch scenario, we would throw \#TS exceptions rather than
3788 * \#GPs.
3789 *
3790 * @returns VBox strict status code.
3791 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3792 * @param pSReg Pointer to the segment register.
3793 * @param uSel The new selector value.
3794 *
3795 * @remarks This does _not_ handle CS or SS.
3796 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3797 */
3798IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3799{
3800 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3801
3802 /* Null data selector. */
3803 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3804 {
3805 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3806 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3807 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3808 return VINF_SUCCESS;
3809 }
3810
3811 /* Fetch the descriptor. */
3812 IEMSELDESC Desc;
3813 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3814 if (rcStrict != VINF_SUCCESS)
3815 {
3816 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3817 VBOXSTRICTRC_VAL(rcStrict)));
3818 return rcStrict;
3819 }
3820
3821 /* Must be a data segment or readable code segment. */
3822 if ( !Desc.Legacy.Gen.u1DescType
3823 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3824 {
3825 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3826 Desc.Legacy.Gen.u4Type));
3827 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3828 }
3829
3830 /* Check privileges for data segments and non-conforming code segments. */
3831 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3832 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3833 {
3834 /* The RPL and the new CPL must be less than or equal to the DPL. */
3835 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3836 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3837 {
3838 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3839 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3840 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3841 }
3842 }
3843
3844 /* Is it there? */
3845 if (!Desc.Legacy.Gen.u1Present)
3846 {
3847 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3848 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3849 }
3850
3851 /* The base and limit. */
3852 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3853 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3854
3855 /*
3856 * Ok, everything checked out fine. Now set the accessed bit before
3857 * committing the result into the registers.
3858 */
3859 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3860 {
3861 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3862 if (rcStrict != VINF_SUCCESS)
3863 return rcStrict;
3864 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3865 }
3866
3867 /* Commit */
3868 pSReg->Sel = uSel;
3869 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3870 pSReg->u32Limit = cbLimit;
3871 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3872 pSReg->ValidSel = uSel;
3873 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3874 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3875 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3876
3877 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3878 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3879 return VINF_SUCCESS;
3880}
3881
3882
3883/**
3884 * Performs a task switch.
3885 *
3886 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3887 * caller is responsible for performing the necessary checks (like DPL, TSS
3888 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3889 * reference for JMP, CALL, IRET.
3890 *
3891 * If the task switch is the due to a software interrupt or hardware exception,
3892 * the caller is responsible for validating the TSS selector and descriptor. See
3893 * Intel Instruction reference for INT n.
3894 *
3895 * @returns VBox strict status code.
3896 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3897 * @param enmTaskSwitch The cause of the task switch.
3898 * @param uNextEip The EIP effective after the task switch.
3899 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3900 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3901 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3902 * @param SelTSS The TSS selector of the new task.
3903 * @param pNewDescTSS Pointer to the new TSS descriptor.
3904 */
3905IEM_STATIC VBOXSTRICTRC
3906iemTaskSwitch(PVMCPUCC pVCpu,
3907 IEMTASKSWITCH enmTaskSwitch,
3908 uint32_t uNextEip,
3909 uint32_t fFlags,
3910 uint16_t uErr,
3911 uint64_t uCr2,
3912 RTSEL SelTSS,
3913 PIEMSELDESC pNewDescTSS)
3914{
3915 Assert(!IEM_IS_REAL_MODE(pVCpu));
3916 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3917 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3918
3919 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3920 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3921 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3922 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3923 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3924
3925 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3926 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3927
3928 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3929 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3930
3931 /* Update CR2 in case it's a page-fault. */
3932 /** @todo This should probably be done much earlier in IEM/PGM. See
3933 * @bugref{5653#c49}. */
3934 if (fFlags & IEM_XCPT_FLAGS_CR2)
3935 pVCpu->cpum.GstCtx.cr2 = uCr2;
3936
3937 /*
3938 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3939 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3940 */
3941 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3942 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3943 if (uNewTSSLimit < uNewTSSLimitMin)
3944 {
3945 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3946 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3947 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3948 }
3949
3950 /*
3951 * Task switches in VMX non-root mode always cause task switches.
3952 * The new TSS must have been read and validated (DPL, limits etc.) before a
3953 * task-switch VM-exit commences.
3954 *
3955 * See Intel spec. 25.4.2 "Treatment of Task Switches".
3956 */
3957 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3958 {
3959 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3960 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3961 }
3962
3963 /*
3964 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3965 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3966 */
3967 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3968 {
3969 uint32_t const uExitInfo1 = SelTSS;
3970 uint32_t uExitInfo2 = uErr;
3971 switch (enmTaskSwitch)
3972 {
3973 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3974 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3975 default: break;
3976 }
3977 if (fFlags & IEM_XCPT_FLAGS_ERR)
3978 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3979 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3980 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3981
3982 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3983 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3984 RT_NOREF2(uExitInfo1, uExitInfo2);
3985 }
3986
3987 /*
3988 * Check the current TSS limit. The last written byte to the current TSS during the
3989 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
3990 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
3991 *
3992 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
3993 * end up with smaller than "legal" TSS limits.
3994 */
3995 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
3996 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
3997 if (uCurTSSLimit < uCurTSSLimitMin)
3998 {
3999 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4000 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4001 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4002 }
4003
4004 /*
4005 * Verify that the new TSS can be accessed and map it. Map only the required contents
4006 * and not the entire TSS.
4007 */
4008 void *pvNewTSS;
4009 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4010 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4011 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4012 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4013 * not perform correct translation if this happens. See Intel spec. 7.2.1
4014 * "Task-State Segment". */
4015 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4016 if (rcStrict != VINF_SUCCESS)
4017 {
4018 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4019 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4020 return rcStrict;
4021 }
4022
4023 /*
4024 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4025 */
4026 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4027 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4028 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4029 {
4030 PX86DESC pDescCurTSS;
4031 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4032 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4033 if (rcStrict != VINF_SUCCESS)
4034 {
4035 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4036 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4037 return rcStrict;
4038 }
4039
4040 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4041 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4042 if (rcStrict != VINF_SUCCESS)
4043 {
4044 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4045 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4046 return rcStrict;
4047 }
4048
4049 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4050 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4051 {
4052 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4053 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4054 u32EFlags &= ~X86_EFL_NT;
4055 }
4056 }
4057
4058 /*
4059 * Save the CPU state into the current TSS.
4060 */
4061 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4062 if (GCPtrNewTSS == GCPtrCurTSS)
4063 {
4064 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4065 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4066 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4067 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4068 pVCpu->cpum.GstCtx.ldtr.Sel));
4069 }
4070 if (fIsNewTSS386)
4071 {
4072 /*
4073 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4074 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4075 */
4076 void *pvCurTSS32;
4077 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4078 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4079 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4080 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4081 if (rcStrict != VINF_SUCCESS)
4082 {
4083 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4084 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4085 return rcStrict;
4086 }
4087
4088 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4089 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4090 pCurTSS32->eip = uNextEip;
4091 pCurTSS32->eflags = u32EFlags;
4092 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4093 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4094 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4095 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4096 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4097 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4098 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4099 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4100 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4101 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4102 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4103 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4104 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4105 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4106
4107 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4108 if (rcStrict != VINF_SUCCESS)
4109 {
4110 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4111 VBOXSTRICTRC_VAL(rcStrict)));
4112 return rcStrict;
4113 }
4114 }
4115 else
4116 {
4117 /*
4118 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4119 */
4120 void *pvCurTSS16;
4121 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4122 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4123 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4124 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4125 if (rcStrict != VINF_SUCCESS)
4126 {
4127 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4128 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4129 return rcStrict;
4130 }
4131
4132 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4133 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4134 pCurTSS16->ip = uNextEip;
4135 pCurTSS16->flags = u32EFlags;
4136 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4137 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4138 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4139 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4140 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4141 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4142 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4143 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4144 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4145 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4146 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4147 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4148
4149 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4150 if (rcStrict != VINF_SUCCESS)
4151 {
4152 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4153 VBOXSTRICTRC_VAL(rcStrict)));
4154 return rcStrict;
4155 }
4156 }
4157
4158 /*
4159 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4160 */
4161 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4162 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4163 {
4164 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4165 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4166 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4167 }
4168
4169 /*
4170 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4171 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4172 */
4173 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4174 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4175 bool fNewDebugTrap;
4176 if (fIsNewTSS386)
4177 {
4178 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4179 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4180 uNewEip = pNewTSS32->eip;
4181 uNewEflags = pNewTSS32->eflags;
4182 uNewEax = pNewTSS32->eax;
4183 uNewEcx = pNewTSS32->ecx;
4184 uNewEdx = pNewTSS32->edx;
4185 uNewEbx = pNewTSS32->ebx;
4186 uNewEsp = pNewTSS32->esp;
4187 uNewEbp = pNewTSS32->ebp;
4188 uNewEsi = pNewTSS32->esi;
4189 uNewEdi = pNewTSS32->edi;
4190 uNewES = pNewTSS32->es;
4191 uNewCS = pNewTSS32->cs;
4192 uNewSS = pNewTSS32->ss;
4193 uNewDS = pNewTSS32->ds;
4194 uNewFS = pNewTSS32->fs;
4195 uNewGS = pNewTSS32->gs;
4196 uNewLdt = pNewTSS32->selLdt;
4197 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4198 }
4199 else
4200 {
4201 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4202 uNewCr3 = 0;
4203 uNewEip = pNewTSS16->ip;
4204 uNewEflags = pNewTSS16->flags;
4205 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4206 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4207 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4208 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4209 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4210 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4211 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4212 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4213 uNewES = pNewTSS16->es;
4214 uNewCS = pNewTSS16->cs;
4215 uNewSS = pNewTSS16->ss;
4216 uNewDS = pNewTSS16->ds;
4217 uNewFS = 0;
4218 uNewGS = 0;
4219 uNewLdt = pNewTSS16->selLdt;
4220 fNewDebugTrap = false;
4221 }
4222
4223 if (GCPtrNewTSS == GCPtrCurTSS)
4224 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4225 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4226
4227 /*
4228 * We're done accessing the new TSS.
4229 */
4230 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4231 if (rcStrict != VINF_SUCCESS)
4232 {
4233 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4234 return rcStrict;
4235 }
4236
4237 /*
4238 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4239 */
4240 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4241 {
4242 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4243 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4244 if (rcStrict != VINF_SUCCESS)
4245 {
4246 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4247 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4248 return rcStrict;
4249 }
4250
4251 /* Check that the descriptor indicates the new TSS is available (not busy). */
4252 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4253 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4254 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4255
4256 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4257 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4258 if (rcStrict != VINF_SUCCESS)
4259 {
4260 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4261 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4262 return rcStrict;
4263 }
4264 }
4265
4266 /*
4267 * From this point on, we're technically in the new task. We will defer exceptions
4268 * until the completion of the task switch but before executing any instructions in the new task.
4269 */
4270 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4271 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4272 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4273 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4274 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4275 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4276 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4277
4278 /* Set the busy bit in TR. */
4279 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4280
4281 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4282 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4283 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4284 {
4285 uNewEflags |= X86_EFL_NT;
4286 }
4287
4288 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4289 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4290 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4291
4292 pVCpu->cpum.GstCtx.eip = uNewEip;
4293 pVCpu->cpum.GstCtx.eax = uNewEax;
4294 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4295 pVCpu->cpum.GstCtx.edx = uNewEdx;
4296 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4297 pVCpu->cpum.GstCtx.esp = uNewEsp;
4298 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4299 pVCpu->cpum.GstCtx.esi = uNewEsi;
4300 pVCpu->cpum.GstCtx.edi = uNewEdi;
4301
4302 uNewEflags &= X86_EFL_LIVE_MASK;
4303 uNewEflags |= X86_EFL_RA1_MASK;
4304 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4305
4306 /*
4307 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4308 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4309 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4310 */
4311 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4312 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4313
4314 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4315 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4316
4317 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4318 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4319
4320 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4321 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4322
4323 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4324 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4325
4326 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4327 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4328 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4329
4330 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4331 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4332 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4333 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4334
4335 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4336 {
4337 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4338 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4339 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4340 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4341 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4342 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4343 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4344 }
4345
4346 /*
4347 * Switch CR3 for the new task.
4348 */
4349 if ( fIsNewTSS386
4350 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4351 {
4352 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4353 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4354 AssertRCSuccessReturn(rc, rc);
4355
4356 /* Inform PGM. */
4357 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
4358 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE), false /* fPdpesMapped */);
4359 AssertRCReturn(rc, rc);
4360 /* ignore informational status codes */
4361
4362 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4363 }
4364
4365 /*
4366 * Switch LDTR for the new task.
4367 */
4368 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4369 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4370 else
4371 {
4372 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4373
4374 IEMSELDESC DescNewLdt;
4375 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4376 if (rcStrict != VINF_SUCCESS)
4377 {
4378 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4379 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4380 return rcStrict;
4381 }
4382 if ( !DescNewLdt.Legacy.Gen.u1Present
4383 || DescNewLdt.Legacy.Gen.u1DescType
4384 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4385 {
4386 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4387 uNewLdt, DescNewLdt.Legacy.u));
4388 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4389 }
4390
4391 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4392 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4393 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4394 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4395 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4396 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4397 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4398 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4399 }
4400
4401 IEMSELDESC DescSS;
4402 if (IEM_IS_V86_MODE(pVCpu))
4403 {
4404 pVCpu->iem.s.uCpl = 3;
4405 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4406 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4407 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4408 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4409 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4410 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4411
4412 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4413 DescSS.Legacy.u = 0;
4414 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4415 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4416 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4417 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4418 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4419 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4420 DescSS.Legacy.Gen.u2Dpl = 3;
4421 }
4422 else
4423 {
4424 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4425
4426 /*
4427 * Load the stack segment for the new task.
4428 */
4429 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4430 {
4431 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4432 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4433 }
4434
4435 /* Fetch the descriptor. */
4436 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4437 if (rcStrict != VINF_SUCCESS)
4438 {
4439 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4440 VBOXSTRICTRC_VAL(rcStrict)));
4441 return rcStrict;
4442 }
4443
4444 /* SS must be a data segment and writable. */
4445 if ( !DescSS.Legacy.Gen.u1DescType
4446 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4447 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4448 {
4449 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4450 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4451 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4452 }
4453
4454 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4455 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4456 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4457 {
4458 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4459 uNewCpl));
4460 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4461 }
4462
4463 /* Is it there? */
4464 if (!DescSS.Legacy.Gen.u1Present)
4465 {
4466 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4467 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4468 }
4469
4470 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4471 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4472
4473 /* Set the accessed bit before committing the result into SS. */
4474 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4475 {
4476 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4477 if (rcStrict != VINF_SUCCESS)
4478 return rcStrict;
4479 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4480 }
4481
4482 /* Commit SS. */
4483 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4484 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4485 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4486 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4487 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4488 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4489 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4490
4491 /* CPL has changed, update IEM before loading rest of segments. */
4492 pVCpu->iem.s.uCpl = uNewCpl;
4493
4494 /*
4495 * Load the data segments for the new task.
4496 */
4497 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4498 if (rcStrict != VINF_SUCCESS)
4499 return rcStrict;
4500 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4501 if (rcStrict != VINF_SUCCESS)
4502 return rcStrict;
4503 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4504 if (rcStrict != VINF_SUCCESS)
4505 return rcStrict;
4506 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4507 if (rcStrict != VINF_SUCCESS)
4508 return rcStrict;
4509
4510 /*
4511 * Load the code segment for the new task.
4512 */
4513 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4514 {
4515 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4516 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4517 }
4518
4519 /* Fetch the descriptor. */
4520 IEMSELDESC DescCS;
4521 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4522 if (rcStrict != VINF_SUCCESS)
4523 {
4524 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4525 return rcStrict;
4526 }
4527
4528 /* CS must be a code segment. */
4529 if ( !DescCS.Legacy.Gen.u1DescType
4530 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4531 {
4532 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4533 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4534 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4535 }
4536
4537 /* For conforming CS, DPL must be less than or equal to the RPL. */
4538 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4539 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4540 {
4541 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4542 DescCS.Legacy.Gen.u2Dpl));
4543 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4544 }
4545
4546 /* For non-conforming CS, DPL must match RPL. */
4547 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4548 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4549 {
4550 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4551 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4552 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4553 }
4554
4555 /* Is it there? */
4556 if (!DescCS.Legacy.Gen.u1Present)
4557 {
4558 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4559 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4560 }
4561
4562 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4563 u64Base = X86DESC_BASE(&DescCS.Legacy);
4564
4565 /* Set the accessed bit before committing the result into CS. */
4566 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4567 {
4568 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4569 if (rcStrict != VINF_SUCCESS)
4570 return rcStrict;
4571 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4572 }
4573
4574 /* Commit CS. */
4575 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4576 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4577 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4578 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4579 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4580 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4581 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4582 }
4583
4584 /** @todo Debug trap. */
4585 if (fIsNewTSS386 && fNewDebugTrap)
4586 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4587
4588 /*
4589 * Construct the error code masks based on what caused this task switch.
4590 * See Intel Instruction reference for INT.
4591 */
4592 uint16_t uExt;
4593 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4594 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4595 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4596 {
4597 uExt = 1;
4598 }
4599 else
4600 uExt = 0;
4601
4602 /*
4603 * Push any error code on to the new stack.
4604 */
4605 if (fFlags & IEM_XCPT_FLAGS_ERR)
4606 {
4607 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4608 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4609 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4610
4611 /* Check that there is sufficient space on the stack. */
4612 /** @todo Factor out segment limit checking for normal/expand down segments
4613 * into a separate function. */
4614 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4615 {
4616 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4617 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4618 {
4619 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4620 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4621 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4622 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4623 }
4624 }
4625 else
4626 {
4627 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4628 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4629 {
4630 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4631 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4632 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4633 }
4634 }
4635
4636
4637 if (fIsNewTSS386)
4638 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4639 else
4640 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4641 if (rcStrict != VINF_SUCCESS)
4642 {
4643 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4644 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4645 return rcStrict;
4646 }
4647 }
4648
4649 /* Check the new EIP against the new CS limit. */
4650 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4651 {
4652 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4653 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4654 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4655 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4656 }
4657
4658 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4659 pVCpu->cpum.GstCtx.ss.Sel));
4660 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4661}
4662
4663
4664/**
4665 * Implements exceptions and interrupts for protected mode.
4666 *
4667 * @returns VBox strict status code.
4668 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4669 * @param cbInstr The number of bytes to offset rIP by in the return
4670 * address.
4671 * @param u8Vector The interrupt / exception vector number.
4672 * @param fFlags The flags.
4673 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4674 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4675 */
4676IEM_STATIC VBOXSTRICTRC
4677iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4678 uint8_t cbInstr,
4679 uint8_t u8Vector,
4680 uint32_t fFlags,
4681 uint16_t uErr,
4682 uint64_t uCr2)
4683{
4684 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4685
4686 /*
4687 * Read the IDT entry.
4688 */
4689 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4690 {
4691 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4692 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4693 }
4694 X86DESC Idte;
4695 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4696 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4697 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4698 {
4699 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4700 return rcStrict;
4701 }
4702 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4703 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4704 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4705
4706 /*
4707 * Check the descriptor type, DPL and such.
4708 * ASSUMES this is done in the same order as described for call-gate calls.
4709 */
4710 if (Idte.Gate.u1DescType)
4711 {
4712 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4713 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4714 }
4715 bool fTaskGate = false;
4716 uint8_t f32BitGate = true;
4717 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4718 switch (Idte.Gate.u4Type)
4719 {
4720 case X86_SEL_TYPE_SYS_UNDEFINED:
4721 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4722 case X86_SEL_TYPE_SYS_LDT:
4723 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4724 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4725 case X86_SEL_TYPE_SYS_UNDEFINED2:
4726 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4727 case X86_SEL_TYPE_SYS_UNDEFINED3:
4728 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4729 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4730 case X86_SEL_TYPE_SYS_UNDEFINED4:
4731 {
4732 /** @todo check what actually happens when the type is wrong...
4733 * esp. call gates. */
4734 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4735 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4736 }
4737
4738 case X86_SEL_TYPE_SYS_286_INT_GATE:
4739 f32BitGate = false;
4740 RT_FALL_THRU();
4741 case X86_SEL_TYPE_SYS_386_INT_GATE:
4742 fEflToClear |= X86_EFL_IF;
4743 break;
4744
4745 case X86_SEL_TYPE_SYS_TASK_GATE:
4746 fTaskGate = true;
4747#ifndef IEM_IMPLEMENTS_TASKSWITCH
4748 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4749#endif
4750 break;
4751
4752 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4753 f32BitGate = false;
4754 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4755 break;
4756
4757 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4758 }
4759
4760 /* Check DPL against CPL if applicable. */
4761 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4762 {
4763 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4764 {
4765 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4766 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4767 }
4768 }
4769
4770 /* Is it there? */
4771 if (!Idte.Gate.u1Present)
4772 {
4773 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4774 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4775 }
4776
4777 /* Is it a task-gate? */
4778 if (fTaskGate)
4779 {
4780 /*
4781 * Construct the error code masks based on what caused this task switch.
4782 * See Intel Instruction reference for INT.
4783 */
4784 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4785 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4786 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4787 RTSEL SelTSS = Idte.Gate.u16Sel;
4788
4789 /*
4790 * Fetch the TSS descriptor in the GDT.
4791 */
4792 IEMSELDESC DescTSS;
4793 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4794 if (rcStrict != VINF_SUCCESS)
4795 {
4796 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4797 VBOXSTRICTRC_VAL(rcStrict)));
4798 return rcStrict;
4799 }
4800
4801 /* The TSS descriptor must be a system segment and be available (not busy). */
4802 if ( DescTSS.Legacy.Gen.u1DescType
4803 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4804 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4805 {
4806 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4807 u8Vector, SelTSS, DescTSS.Legacy.au64));
4808 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4809 }
4810
4811 /* The TSS must be present. */
4812 if (!DescTSS.Legacy.Gen.u1Present)
4813 {
4814 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4815 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4816 }
4817
4818 /* Do the actual task switch. */
4819 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4820 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4821 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4822 }
4823
4824 /* A null CS is bad. */
4825 RTSEL NewCS = Idte.Gate.u16Sel;
4826 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4827 {
4828 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4829 return iemRaiseGeneralProtectionFault0(pVCpu);
4830 }
4831
4832 /* Fetch the descriptor for the new CS. */
4833 IEMSELDESC DescCS;
4834 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4835 if (rcStrict != VINF_SUCCESS)
4836 {
4837 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4838 return rcStrict;
4839 }
4840
4841 /* Must be a code segment. */
4842 if (!DescCS.Legacy.Gen.u1DescType)
4843 {
4844 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4845 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4846 }
4847 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4848 {
4849 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4850 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4851 }
4852
4853 /* Don't allow lowering the privilege level. */
4854 /** @todo Does the lowering of privileges apply to software interrupts
4855 * only? This has bearings on the more-privileged or
4856 * same-privilege stack behavior further down. A testcase would
4857 * be nice. */
4858 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4859 {
4860 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4861 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4862 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4863 }
4864
4865 /* Make sure the selector is present. */
4866 if (!DescCS.Legacy.Gen.u1Present)
4867 {
4868 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4869 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4870 }
4871
4872 /* Check the new EIP against the new CS limit. */
4873 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4874 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4875 ? Idte.Gate.u16OffsetLow
4876 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4877 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4878 if (uNewEip > cbLimitCS)
4879 {
4880 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4881 u8Vector, uNewEip, cbLimitCS, NewCS));
4882 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4883 }
4884 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4885
4886 /* Calc the flag image to push. */
4887 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4888 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4889 fEfl &= ~X86_EFL_RF;
4890 else
4891 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4892
4893 /* From V8086 mode only go to CPL 0. */
4894 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4895 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4896 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4897 {
4898 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4899 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4900 }
4901
4902 /*
4903 * If the privilege level changes, we need to get a new stack from the TSS.
4904 * This in turns means validating the new SS and ESP...
4905 */
4906 if (uNewCpl != pVCpu->iem.s.uCpl)
4907 {
4908 RTSEL NewSS;
4909 uint32_t uNewEsp;
4910 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4911 if (rcStrict != VINF_SUCCESS)
4912 return rcStrict;
4913
4914 IEMSELDESC DescSS;
4915 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4916 if (rcStrict != VINF_SUCCESS)
4917 return rcStrict;
4918 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4919 if (!DescSS.Legacy.Gen.u1DefBig)
4920 {
4921 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4922 uNewEsp = (uint16_t)uNewEsp;
4923 }
4924
4925 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4926
4927 /* Check that there is sufficient space for the stack frame. */
4928 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4929 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4930 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4931 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4932
4933 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4934 {
4935 if ( uNewEsp - 1 > cbLimitSS
4936 || uNewEsp < cbStackFrame)
4937 {
4938 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4939 u8Vector, NewSS, uNewEsp, cbStackFrame));
4940 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4941 }
4942 }
4943 else
4944 {
4945 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4946 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4947 {
4948 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4949 u8Vector, NewSS, uNewEsp, cbStackFrame));
4950 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4951 }
4952 }
4953
4954 /*
4955 * Start making changes.
4956 */
4957
4958 /* Set the new CPL so that stack accesses use it. */
4959 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4960 pVCpu->iem.s.uCpl = uNewCpl;
4961
4962 /* Create the stack frame. */
4963 RTPTRUNION uStackFrame;
4964 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4965 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4966 if (rcStrict != VINF_SUCCESS)
4967 return rcStrict;
4968 void * const pvStackFrame = uStackFrame.pv;
4969 if (f32BitGate)
4970 {
4971 if (fFlags & IEM_XCPT_FLAGS_ERR)
4972 *uStackFrame.pu32++ = uErr;
4973 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4974 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4975 uStackFrame.pu32[2] = fEfl;
4976 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4977 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4978 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4979 if (fEfl & X86_EFL_VM)
4980 {
4981 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4982 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4983 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4984 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
4985 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
4986 }
4987 }
4988 else
4989 {
4990 if (fFlags & IEM_XCPT_FLAGS_ERR)
4991 *uStackFrame.pu16++ = uErr;
4992 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
4993 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4994 uStackFrame.pu16[2] = fEfl;
4995 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
4996 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
4997 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
4998 if (fEfl & X86_EFL_VM)
4999 {
5000 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5001 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5002 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5003 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5004 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5005 }
5006 }
5007 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5008 if (rcStrict != VINF_SUCCESS)
5009 return rcStrict;
5010
5011 /* Mark the selectors 'accessed' (hope this is the correct time). */
5012 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5013 * after pushing the stack frame? (Write protect the gdt + stack to
5014 * find out.) */
5015 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5016 {
5017 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5018 if (rcStrict != VINF_SUCCESS)
5019 return rcStrict;
5020 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5021 }
5022
5023 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5024 {
5025 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5026 if (rcStrict != VINF_SUCCESS)
5027 return rcStrict;
5028 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5029 }
5030
5031 /*
5032 * Start comitting the register changes (joins with the DPL=CPL branch).
5033 */
5034 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5035 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5036 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5037 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5038 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5039 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5040 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5041 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5042 * SP is loaded).
5043 * Need to check the other combinations too:
5044 * - 16-bit TSS, 32-bit handler
5045 * - 32-bit TSS, 16-bit handler */
5046 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5047 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5048 else
5049 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5050
5051 if (fEfl & X86_EFL_VM)
5052 {
5053 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5054 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5055 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5056 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5057 }
5058 }
5059 /*
5060 * Same privilege, no stack change and smaller stack frame.
5061 */
5062 else
5063 {
5064 uint64_t uNewRsp;
5065 RTPTRUNION uStackFrame;
5066 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5067 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5068 if (rcStrict != VINF_SUCCESS)
5069 return rcStrict;
5070 void * const pvStackFrame = uStackFrame.pv;
5071
5072 if (f32BitGate)
5073 {
5074 if (fFlags & IEM_XCPT_FLAGS_ERR)
5075 *uStackFrame.pu32++ = uErr;
5076 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5077 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5078 uStackFrame.pu32[2] = fEfl;
5079 }
5080 else
5081 {
5082 if (fFlags & IEM_XCPT_FLAGS_ERR)
5083 *uStackFrame.pu16++ = uErr;
5084 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5085 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5086 uStackFrame.pu16[2] = fEfl;
5087 }
5088 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5089 if (rcStrict != VINF_SUCCESS)
5090 return rcStrict;
5091
5092 /* Mark the CS selector as 'accessed'. */
5093 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5094 {
5095 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5096 if (rcStrict != VINF_SUCCESS)
5097 return rcStrict;
5098 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5099 }
5100
5101 /*
5102 * Start committing the register changes (joins with the other branch).
5103 */
5104 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5105 }
5106
5107 /* ... register committing continues. */
5108 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5109 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5110 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5111 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5112 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5113 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5114
5115 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5116 fEfl &= ~fEflToClear;
5117 IEMMISC_SET_EFL(pVCpu, fEfl);
5118
5119 if (fFlags & IEM_XCPT_FLAGS_CR2)
5120 pVCpu->cpum.GstCtx.cr2 = uCr2;
5121
5122 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5123 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5124
5125 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5126}
5127
5128
5129/**
5130 * Implements exceptions and interrupts for long mode.
5131 *
5132 * @returns VBox strict status code.
5133 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5134 * @param cbInstr The number of bytes to offset rIP by in the return
5135 * address.
5136 * @param u8Vector The interrupt / exception vector number.
5137 * @param fFlags The flags.
5138 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5139 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5140 */
5141IEM_STATIC VBOXSTRICTRC
5142iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5143 uint8_t cbInstr,
5144 uint8_t u8Vector,
5145 uint32_t fFlags,
5146 uint16_t uErr,
5147 uint64_t uCr2)
5148{
5149 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5150
5151 /*
5152 * Read the IDT entry.
5153 */
5154 uint16_t offIdt = (uint16_t)u8Vector << 4;
5155 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5156 {
5157 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5158 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5159 }
5160 X86DESC64 Idte;
5161#ifdef _MSC_VER /* Shut up silly compiler warning. */
5162 Idte.au64[0] = 0;
5163 Idte.au64[1] = 0;
5164#endif
5165 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5166 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5167 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5168 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5169 {
5170 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5171 return rcStrict;
5172 }
5173 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5174 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5175 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5176
5177 /*
5178 * Check the descriptor type, DPL and such.
5179 * ASSUMES this is done in the same order as described for call-gate calls.
5180 */
5181 if (Idte.Gate.u1DescType)
5182 {
5183 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5184 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5185 }
5186 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5187 switch (Idte.Gate.u4Type)
5188 {
5189 case AMD64_SEL_TYPE_SYS_INT_GATE:
5190 fEflToClear |= X86_EFL_IF;
5191 break;
5192 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5193 break;
5194
5195 default:
5196 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5197 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5198 }
5199
5200 /* Check DPL against CPL if applicable. */
5201 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5202 {
5203 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5204 {
5205 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5206 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5207 }
5208 }
5209
5210 /* Is it there? */
5211 if (!Idte.Gate.u1Present)
5212 {
5213 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5214 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5215 }
5216
5217 /* A null CS is bad. */
5218 RTSEL NewCS = Idte.Gate.u16Sel;
5219 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5220 {
5221 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5222 return iemRaiseGeneralProtectionFault0(pVCpu);
5223 }
5224
5225 /* Fetch the descriptor for the new CS. */
5226 IEMSELDESC DescCS;
5227 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5228 if (rcStrict != VINF_SUCCESS)
5229 {
5230 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5231 return rcStrict;
5232 }
5233
5234 /* Must be a 64-bit code segment. */
5235 if (!DescCS.Long.Gen.u1DescType)
5236 {
5237 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5238 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5239 }
5240 if ( !DescCS.Long.Gen.u1Long
5241 || DescCS.Long.Gen.u1DefBig
5242 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5243 {
5244 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5245 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5246 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5247 }
5248
5249 /* Don't allow lowering the privilege level. For non-conforming CS
5250 selectors, the CS.DPL sets the privilege level the trap/interrupt
5251 handler runs at. For conforming CS selectors, the CPL remains
5252 unchanged, but the CS.DPL must be <= CPL. */
5253 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5254 * when CPU in Ring-0. Result \#GP? */
5255 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5256 {
5257 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5258 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5259 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5260 }
5261
5262
5263 /* Make sure the selector is present. */
5264 if (!DescCS.Legacy.Gen.u1Present)
5265 {
5266 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5267 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5268 }
5269
5270 /* Check that the new RIP is canonical. */
5271 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5272 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5273 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5274 if (!IEM_IS_CANONICAL(uNewRip))
5275 {
5276 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5277 return iemRaiseGeneralProtectionFault0(pVCpu);
5278 }
5279
5280 /*
5281 * If the privilege level changes or if the IST isn't zero, we need to get
5282 * a new stack from the TSS.
5283 */
5284 uint64_t uNewRsp;
5285 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5286 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5287 if ( uNewCpl != pVCpu->iem.s.uCpl
5288 || Idte.Gate.u3IST != 0)
5289 {
5290 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5291 if (rcStrict != VINF_SUCCESS)
5292 return rcStrict;
5293 }
5294 else
5295 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5296 uNewRsp &= ~(uint64_t)0xf;
5297
5298 /*
5299 * Calc the flag image to push.
5300 */
5301 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5302 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5303 fEfl &= ~X86_EFL_RF;
5304 else
5305 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5306
5307 /*
5308 * Start making changes.
5309 */
5310 /* Set the new CPL so that stack accesses use it. */
5311 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5312 pVCpu->iem.s.uCpl = uNewCpl;
5313
5314 /* Create the stack frame. */
5315 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5316 RTPTRUNION uStackFrame;
5317 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5318 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5319 if (rcStrict != VINF_SUCCESS)
5320 return rcStrict;
5321 void * const pvStackFrame = uStackFrame.pv;
5322
5323 if (fFlags & IEM_XCPT_FLAGS_ERR)
5324 *uStackFrame.pu64++ = uErr;
5325 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5326 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5327 uStackFrame.pu64[2] = fEfl;
5328 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5329 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5330 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5331 if (rcStrict != VINF_SUCCESS)
5332 return rcStrict;
5333
5334 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5335 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5336 * after pushing the stack frame? (Write protect the gdt + stack to
5337 * find out.) */
5338 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5339 {
5340 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5341 if (rcStrict != VINF_SUCCESS)
5342 return rcStrict;
5343 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5344 }
5345
5346 /*
5347 * Start comitting the register changes.
5348 */
5349 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5350 * hidden registers when interrupting 32-bit or 16-bit code! */
5351 if (uNewCpl != uOldCpl)
5352 {
5353 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5354 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5355 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5356 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5357 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5358 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5359 }
5360 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5361 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5362 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5363 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5364 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5365 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5366 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5367 pVCpu->cpum.GstCtx.rip = uNewRip;
5368
5369 fEfl &= ~fEflToClear;
5370 IEMMISC_SET_EFL(pVCpu, fEfl);
5371
5372 if (fFlags & IEM_XCPT_FLAGS_CR2)
5373 pVCpu->cpum.GstCtx.cr2 = uCr2;
5374
5375 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5376 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5377
5378 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5379}
5380
5381
5382/**
5383 * Implements exceptions and interrupts.
5384 *
5385 * All exceptions and interrupts goes thru this function!
5386 *
5387 * @returns VBox strict status code.
5388 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5389 * @param cbInstr The number of bytes to offset rIP by in the return
5390 * address.
5391 * @param u8Vector The interrupt / exception vector number.
5392 * @param fFlags The flags.
5393 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5394 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5395 */
5396DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5397iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5398 uint8_t cbInstr,
5399 uint8_t u8Vector,
5400 uint32_t fFlags,
5401 uint16_t uErr,
5402 uint64_t uCr2)
5403{
5404 /*
5405 * Get all the state that we might need here.
5406 */
5407 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5408 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5409
5410#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5411 /*
5412 * Flush prefetch buffer
5413 */
5414 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5415#endif
5416
5417 /*
5418 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5419 */
5420 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5421 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5422 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5423 | IEM_XCPT_FLAGS_BP_INSTR
5424 | IEM_XCPT_FLAGS_ICEBP_INSTR
5425 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5426 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5427 {
5428 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5429 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5430 u8Vector = X86_XCPT_GP;
5431 uErr = 0;
5432 }
5433#ifdef DBGFTRACE_ENABLED
5434 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5435 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5436 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5437#endif
5438
5439 /*
5440 * Evaluate whether NMI blocking should be in effect.
5441 * Normally, NMI blocking is in effect whenever we inject an NMI.
5442 */
5443 bool fBlockNmi;
5444 if ( u8Vector == X86_XCPT_NMI
5445 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5446 fBlockNmi = true;
5447 else
5448 fBlockNmi = false;
5449
5450#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5451 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5452 {
5453 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5454 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5455 return rcStrict0;
5456
5457 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5458 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5459 {
5460 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5461 fBlockNmi = false;
5462 }
5463 }
5464#endif
5465
5466#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5467 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5468 {
5469 /*
5470 * If the event is being injected as part of VMRUN, it isn't subject to event
5471 * intercepts in the nested-guest. However, secondary exceptions that occur
5472 * during injection of any event -are- subject to exception intercepts.
5473 *
5474 * See AMD spec. 15.20 "Event Injection".
5475 */
5476 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5477 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5478 else
5479 {
5480 /*
5481 * Check and handle if the event being raised is intercepted.
5482 */
5483 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5484 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5485 return rcStrict0;
5486 }
5487 }
5488#endif
5489
5490 /*
5491 * Set NMI blocking if necessary.
5492 */
5493 if ( fBlockNmi
5494 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5495 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5496
5497 /*
5498 * Do recursion accounting.
5499 */
5500 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5501 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5502 if (pVCpu->iem.s.cXcptRecursions == 0)
5503 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5504 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5505 else
5506 {
5507 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5508 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5509 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5510
5511 if (pVCpu->iem.s.cXcptRecursions >= 4)
5512 {
5513#ifdef DEBUG_bird
5514 AssertFailed();
5515#endif
5516 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5517 }
5518
5519 /*
5520 * Evaluate the sequence of recurring events.
5521 */
5522 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5523 NULL /* pXcptRaiseInfo */);
5524 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5525 { /* likely */ }
5526 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5527 {
5528 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5529 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5530 u8Vector = X86_XCPT_DF;
5531 uErr = 0;
5532#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5533 /* VMX nested-guest #DF intercept needs to be checked here. */
5534 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5535 {
5536 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5537 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5538 return rcStrict0;
5539 }
5540#endif
5541 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5542 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5543 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5544 }
5545 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5546 {
5547 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5548 return iemInitiateCpuShutdown(pVCpu);
5549 }
5550 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5551 {
5552 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5553 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5554 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5555 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5556 return VERR_EM_GUEST_CPU_HANG;
5557 }
5558 else
5559 {
5560 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5561 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5562 return VERR_IEM_IPE_9;
5563 }
5564
5565 /*
5566 * The 'EXT' bit is set when an exception occurs during deliver of an external
5567 * event (such as an interrupt or earlier exception)[1]. Privileged software
5568 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5569 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5570 *
5571 * [1] - Intel spec. 6.13 "Error Code"
5572 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5573 * [3] - Intel Instruction reference for INT n.
5574 */
5575 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5576 && (fFlags & IEM_XCPT_FLAGS_ERR)
5577 && u8Vector != X86_XCPT_PF
5578 && u8Vector != X86_XCPT_DF)
5579 {
5580 uErr |= X86_TRAP_ERR_EXTERNAL;
5581 }
5582 }
5583
5584 pVCpu->iem.s.cXcptRecursions++;
5585 pVCpu->iem.s.uCurXcpt = u8Vector;
5586 pVCpu->iem.s.fCurXcpt = fFlags;
5587 pVCpu->iem.s.uCurXcptErr = uErr;
5588 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5589
5590 /*
5591 * Extensive logging.
5592 */
5593#if defined(LOG_ENABLED) && defined(IN_RING3)
5594 if (LogIs3Enabled())
5595 {
5596 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5597 PVM pVM = pVCpu->CTX_SUFF(pVM);
5598 char szRegs[4096];
5599 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5600 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5601 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5602 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5603 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5604 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5605 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5606 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5607 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5608 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5609 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5610 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5611 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5612 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5613 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5614 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5615 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5616 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5617 " efer=%016VR{efer}\n"
5618 " pat=%016VR{pat}\n"
5619 " sf_mask=%016VR{sf_mask}\n"
5620 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5621 " lstar=%016VR{lstar}\n"
5622 " star=%016VR{star} cstar=%016VR{cstar}\n"
5623 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5624 );
5625
5626 char szInstr[256];
5627 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5628 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5629 szInstr, sizeof(szInstr), NULL);
5630 Log3(("%s%s\n", szRegs, szInstr));
5631 }
5632#endif /* LOG_ENABLED */
5633
5634 /*
5635 * Call the mode specific worker function.
5636 */
5637 VBOXSTRICTRC rcStrict;
5638 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5639 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5640 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5641 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5642 else
5643 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5644
5645 /* Flush the prefetch buffer. */
5646#ifdef IEM_WITH_CODE_TLB
5647 pVCpu->iem.s.pbInstrBuf = NULL;
5648#else
5649 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5650#endif
5651
5652 /*
5653 * Unwind.
5654 */
5655 pVCpu->iem.s.cXcptRecursions--;
5656 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5657 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5658 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5659 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5660 pVCpu->iem.s.cXcptRecursions + 1));
5661 return rcStrict;
5662}
5663
5664#ifdef IEM_WITH_SETJMP
5665/**
5666 * See iemRaiseXcptOrInt. Will not return.
5667 */
5668IEM_STATIC DECL_NO_RETURN(void)
5669iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5670 uint8_t cbInstr,
5671 uint8_t u8Vector,
5672 uint32_t fFlags,
5673 uint16_t uErr,
5674 uint64_t uCr2)
5675{
5676 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5677 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5678}
5679#endif
5680
5681
5682/** \#DE - 00. */
5683DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5684{
5685 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5686}
5687
5688
5689/** \#DB - 01.
5690 * @note This automatically clear DR7.GD. */
5691DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5692{
5693 /** @todo set/clear RF. */
5694 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5695 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5696}
5697
5698
5699/** \#BR - 05. */
5700DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5701{
5702 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5703}
5704
5705
5706/** \#UD - 06. */
5707DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5708{
5709 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5710}
5711
5712
5713/** \#NM - 07. */
5714DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5715{
5716 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5717}
5718
5719
5720/** \#TS(err) - 0a. */
5721DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5722{
5723 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5724}
5725
5726
5727/** \#TS(tr) - 0a. */
5728DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5729{
5730 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5731 pVCpu->cpum.GstCtx.tr.Sel, 0);
5732}
5733
5734
5735/** \#TS(0) - 0a. */
5736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5737{
5738 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5739 0, 0);
5740}
5741
5742
5743/** \#TS(err) - 0a. */
5744DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5745{
5746 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5747 uSel & X86_SEL_MASK_OFF_RPL, 0);
5748}
5749
5750
5751/** \#NP(err) - 0b. */
5752DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5753{
5754 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5755}
5756
5757
5758/** \#NP(sel) - 0b. */
5759DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5760{
5761 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5762 uSel & ~X86_SEL_RPL, 0);
5763}
5764
5765
5766/** \#SS(seg) - 0c. */
5767DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5768{
5769 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5770 uSel & ~X86_SEL_RPL, 0);
5771}
5772
5773
5774/** \#SS(err) - 0c. */
5775DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5776{
5777 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5778}
5779
5780
5781/** \#GP(n) - 0d. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5785}
5786
5787
5788/** \#GP(0) - 0d. */
5789DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5790{
5791 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5792}
5793
5794#ifdef IEM_WITH_SETJMP
5795/** \#GP(0) - 0d. */
5796DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5797{
5798 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5799}
5800#endif
5801
5802
5803/** \#GP(sel) - 0d. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5805{
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5807 Sel & ~X86_SEL_RPL, 0);
5808}
5809
5810
5811/** \#GP(0) - 0d. */
5812DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5813{
5814 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5815}
5816
5817
5818/** \#GP(sel) - 0d. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5820{
5821 NOREF(iSegReg); NOREF(fAccess);
5822 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5823 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5824}
5825
5826#ifdef IEM_WITH_SETJMP
5827/** \#GP(sel) - 0d, longjmp. */
5828DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5829{
5830 NOREF(iSegReg); NOREF(fAccess);
5831 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5832 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5833}
5834#endif
5835
5836/** \#GP(sel) - 0d. */
5837DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5838{
5839 NOREF(Sel);
5840 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5841}
5842
5843#ifdef IEM_WITH_SETJMP
5844/** \#GP(sel) - 0d, longjmp. */
5845DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5846{
5847 NOREF(Sel);
5848 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5849}
5850#endif
5851
5852
5853/** \#GP(sel) - 0d. */
5854DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5855{
5856 NOREF(iSegReg); NOREF(fAccess);
5857 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5858}
5859
5860#ifdef IEM_WITH_SETJMP
5861/** \#GP(sel) - 0d, longjmp. */
5862DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5863 uint32_t fAccess)
5864{
5865 NOREF(iSegReg); NOREF(fAccess);
5866 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5867}
5868#endif
5869
5870
5871/** \#PF(n) - 0e. */
5872DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5873{
5874 uint16_t uErr;
5875 switch (rc)
5876 {
5877 case VERR_PAGE_NOT_PRESENT:
5878 case VERR_PAGE_TABLE_NOT_PRESENT:
5879 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5880 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5881 uErr = 0;
5882 break;
5883
5884 default:
5885 AssertMsgFailed(("%Rrc\n", rc));
5886 RT_FALL_THRU();
5887 case VERR_ACCESS_DENIED:
5888 uErr = X86_TRAP_PF_P;
5889 break;
5890
5891 /** @todo reserved */
5892 }
5893
5894 if (pVCpu->iem.s.uCpl == 3)
5895 uErr |= X86_TRAP_PF_US;
5896
5897 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5898 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5899 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5900 uErr |= X86_TRAP_PF_ID;
5901
5902#if 0 /* This is so much non-sense, really. Why was it done like that? */
5903 /* Note! RW access callers reporting a WRITE protection fault, will clear
5904 the READ flag before calling. So, read-modify-write accesses (RW)
5905 can safely be reported as READ faults. */
5906 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5907 uErr |= X86_TRAP_PF_RW;
5908#else
5909 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5910 {
5911 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
5912 /// (regardless of outcome of the comparison in the latter case).
5913 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
5914 uErr |= X86_TRAP_PF_RW;
5915 }
5916#endif
5917
5918 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5919 uErr, GCPtrWhere);
5920}
5921
5922#ifdef IEM_WITH_SETJMP
5923/** \#PF(n) - 0e, longjmp. */
5924IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5925{
5926 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5927}
5928#endif
5929
5930
5931/** \#MF(0) - 10. */
5932DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5933{
5934 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5935}
5936
5937
5938/** \#AC(0) - 11. */
5939DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5940{
5941 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5942}
5943
5944
5945/**
5946 * Macro for calling iemCImplRaiseDivideError().
5947 *
5948 * This enables us to add/remove arguments and force different levels of
5949 * inlining as we wish.
5950 *
5951 * @return Strict VBox status code.
5952 */
5953#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5954IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5955{
5956 NOREF(cbInstr);
5957 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5958}
5959
5960
5961/**
5962 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5963 *
5964 * This enables us to add/remove arguments and force different levels of
5965 * inlining as we wish.
5966 *
5967 * @return Strict VBox status code.
5968 */
5969#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5970IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5971{
5972 NOREF(cbInstr);
5973 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5974}
5975
5976
5977/**
5978 * Macro for calling iemCImplRaiseInvalidOpcode().
5979 *
5980 * This enables us to add/remove arguments and force different levels of
5981 * inlining as we wish.
5982 *
5983 * @return Strict VBox status code.
5984 */
5985#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
5986IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
5987{
5988 NOREF(cbInstr);
5989 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5990}
5991
5992
5993/** @} */
5994
5995
5996/*
5997 *
5998 * Helpers routines.
5999 * Helpers routines.
6000 * Helpers routines.
6001 *
6002 */
6003
6004/**
6005 * Recalculates the effective operand size.
6006 *
6007 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6008 */
6009IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6010{
6011 switch (pVCpu->iem.s.enmCpuMode)
6012 {
6013 case IEMMODE_16BIT:
6014 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6015 break;
6016 case IEMMODE_32BIT:
6017 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6018 break;
6019 case IEMMODE_64BIT:
6020 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6021 {
6022 case 0:
6023 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6024 break;
6025 case IEM_OP_PRF_SIZE_OP:
6026 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6027 break;
6028 case IEM_OP_PRF_SIZE_REX_W:
6029 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6030 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6031 break;
6032 }
6033 break;
6034 default:
6035 AssertFailed();
6036 }
6037}
6038
6039
6040/**
6041 * Sets the default operand size to 64-bit and recalculates the effective
6042 * operand size.
6043 *
6044 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6045 */
6046IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6047{
6048 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6049 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6050 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6051 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6052 else
6053 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6054}
6055
6056
6057/*
6058 *
6059 * Common opcode decoders.
6060 * Common opcode decoders.
6061 * Common opcode decoders.
6062 *
6063 */
6064//#include <iprt/mem.h>
6065
6066/**
6067 * Used to add extra details about a stub case.
6068 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6069 */
6070IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6071{
6072#if defined(LOG_ENABLED) && defined(IN_RING3)
6073 PVM pVM = pVCpu->CTX_SUFF(pVM);
6074 char szRegs[4096];
6075 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6076 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6077 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6078 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6079 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6080 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6081 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6082 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6083 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6084 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6085 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6086 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6087 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6088 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6089 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6090 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6091 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6092 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6093 " efer=%016VR{efer}\n"
6094 " pat=%016VR{pat}\n"
6095 " sf_mask=%016VR{sf_mask}\n"
6096 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6097 " lstar=%016VR{lstar}\n"
6098 " star=%016VR{star} cstar=%016VR{cstar}\n"
6099 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6100 );
6101
6102 char szInstr[256];
6103 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6104 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6105 szInstr, sizeof(szInstr), NULL);
6106
6107 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6108#else
6109 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6110#endif
6111}
6112
6113/**
6114 * Complains about a stub.
6115 *
6116 * Providing two versions of this macro, one for daily use and one for use when
6117 * working on IEM.
6118 */
6119#if 0
6120# define IEMOP_BITCH_ABOUT_STUB() \
6121 do { \
6122 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6123 iemOpStubMsg2(pVCpu); \
6124 RTAssertPanic(); \
6125 } while (0)
6126#else
6127# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6128#endif
6129
6130/** Stubs an opcode. */
6131#define FNIEMOP_STUB(a_Name) \
6132 FNIEMOP_DEF(a_Name) \
6133 { \
6134 RT_NOREF_PV(pVCpu); \
6135 IEMOP_BITCH_ABOUT_STUB(); \
6136 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6137 } \
6138 typedef int ignore_semicolon
6139
6140/** Stubs an opcode. */
6141#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6142 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6143 { \
6144 RT_NOREF_PV(pVCpu); \
6145 RT_NOREF_PV(a_Name0); \
6146 IEMOP_BITCH_ABOUT_STUB(); \
6147 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6148 } \
6149 typedef int ignore_semicolon
6150
6151/** Stubs an opcode which currently should raise \#UD. */
6152#define FNIEMOP_UD_STUB(a_Name) \
6153 FNIEMOP_DEF(a_Name) \
6154 { \
6155 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6156 return IEMOP_RAISE_INVALID_OPCODE(); \
6157 } \
6158 typedef int ignore_semicolon
6159
6160/** Stubs an opcode which currently should raise \#UD. */
6161#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6162 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6163 { \
6164 RT_NOREF_PV(pVCpu); \
6165 RT_NOREF_PV(a_Name0); \
6166 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6167 return IEMOP_RAISE_INVALID_OPCODE(); \
6168 } \
6169 typedef int ignore_semicolon
6170
6171
6172
6173/** @name Register Access.
6174 * @{
6175 */
6176
6177/**
6178 * Gets a reference (pointer) to the specified hidden segment register.
6179 *
6180 * @returns Hidden register reference.
6181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6182 * @param iSegReg The segment register.
6183 */
6184IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6185{
6186 Assert(iSegReg < X86_SREG_COUNT);
6187 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6188 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6189
6190 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6191 return pSReg;
6192}
6193
6194
6195/**
6196 * Ensures that the given hidden segment register is up to date.
6197 *
6198 * @returns Hidden register reference.
6199 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6200 * @param pSReg The segment register.
6201 */
6202IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6203{
6204 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6205 NOREF(pVCpu);
6206 return pSReg;
6207}
6208
6209
6210/**
6211 * Gets a reference (pointer) to the specified segment register (the selector
6212 * value).
6213 *
6214 * @returns Pointer to the selector variable.
6215 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6216 * @param iSegReg The segment register.
6217 */
6218DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6219{
6220 Assert(iSegReg < X86_SREG_COUNT);
6221 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6222 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6223}
6224
6225
6226/**
6227 * Fetches the selector value of a segment register.
6228 *
6229 * @returns The selector value.
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param iSegReg The segment register.
6232 */
6233DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6234{
6235 Assert(iSegReg < X86_SREG_COUNT);
6236 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6237 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6238}
6239
6240
6241/**
6242 * Fetches the base address value of a segment register.
6243 *
6244 * @returns The selector value.
6245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6246 * @param iSegReg The segment register.
6247 */
6248DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6249{
6250 Assert(iSegReg < X86_SREG_COUNT);
6251 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6252 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6253}
6254
6255
6256/**
6257 * Gets a reference (pointer) to the specified general purpose register.
6258 *
6259 * @returns Register reference.
6260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6261 * @param iReg The general purpose register.
6262 */
6263DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6264{
6265 Assert(iReg < 16);
6266 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6267}
6268
6269
6270/**
6271 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6272 *
6273 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6274 *
6275 * @returns Register reference.
6276 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6277 * @param iReg The register.
6278 */
6279DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6280{
6281 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6282 {
6283 Assert(iReg < 16);
6284 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6285 }
6286 /* high 8-bit register. */
6287 Assert(iReg < 8);
6288 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6289}
6290
6291
6292/**
6293 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6294 *
6295 * @returns Register reference.
6296 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6297 * @param iReg The register.
6298 */
6299DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6300{
6301 Assert(iReg < 16);
6302 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6303}
6304
6305
6306/**
6307 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6308 *
6309 * @returns Register reference.
6310 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6311 * @param iReg The register.
6312 */
6313DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6314{
6315 Assert(iReg < 16);
6316 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6317}
6318
6319
6320/**
6321 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6322 *
6323 * @returns Register reference.
6324 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6325 * @param iReg The register.
6326 */
6327DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6328{
6329 Assert(iReg < 64);
6330 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6331}
6332
6333
6334/**
6335 * Gets a reference (pointer) to the specified segment register's base address.
6336 *
6337 * @returns Segment register base address reference.
6338 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6339 * @param iSegReg The segment selector.
6340 */
6341DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6342{
6343 Assert(iSegReg < X86_SREG_COUNT);
6344 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6345 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6346}
6347
6348
6349/**
6350 * Fetches the value of a 8-bit general purpose register.
6351 *
6352 * @returns The register value.
6353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6354 * @param iReg The register.
6355 */
6356DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6357{
6358 return *iemGRegRefU8(pVCpu, iReg);
6359}
6360
6361
6362/**
6363 * Fetches the value of a 16-bit general purpose register.
6364 *
6365 * @returns The register value.
6366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6367 * @param iReg The register.
6368 */
6369DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6370{
6371 Assert(iReg < 16);
6372 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6373}
6374
6375
6376/**
6377 * Fetches the value of a 32-bit general purpose register.
6378 *
6379 * @returns The register value.
6380 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6381 * @param iReg The register.
6382 */
6383DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6384{
6385 Assert(iReg < 16);
6386 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6387}
6388
6389
6390/**
6391 * Fetches the value of a 64-bit general purpose register.
6392 *
6393 * @returns The register value.
6394 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6395 * @param iReg The register.
6396 */
6397DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6398{
6399 Assert(iReg < 16);
6400 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6401}
6402
6403
6404/**
6405 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6406 *
6407 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6408 * segment limit.
6409 *
6410 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6411 * @param offNextInstr The offset of the next instruction.
6412 */
6413IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6414{
6415 switch (pVCpu->iem.s.enmEffOpSize)
6416 {
6417 case IEMMODE_16BIT:
6418 {
6419 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6420 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6421 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6422 return iemRaiseGeneralProtectionFault0(pVCpu);
6423 pVCpu->cpum.GstCtx.rip = uNewIp;
6424 break;
6425 }
6426
6427 case IEMMODE_32BIT:
6428 {
6429 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6430 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6431
6432 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6433 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6434 return iemRaiseGeneralProtectionFault0(pVCpu);
6435 pVCpu->cpum.GstCtx.rip = uNewEip;
6436 break;
6437 }
6438
6439 case IEMMODE_64BIT:
6440 {
6441 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6442
6443 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6444 if (!IEM_IS_CANONICAL(uNewRip))
6445 return iemRaiseGeneralProtectionFault0(pVCpu);
6446 pVCpu->cpum.GstCtx.rip = uNewRip;
6447 break;
6448 }
6449
6450 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6451 }
6452
6453 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6454
6455#ifndef IEM_WITH_CODE_TLB
6456 /* Flush the prefetch buffer. */
6457 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6458#endif
6459
6460 return VINF_SUCCESS;
6461}
6462
6463
6464/**
6465 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6466 *
6467 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6468 * segment limit.
6469 *
6470 * @returns Strict VBox status code.
6471 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6472 * @param offNextInstr The offset of the next instruction.
6473 */
6474IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6475{
6476 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6477
6478 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6479 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6480 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6481 return iemRaiseGeneralProtectionFault0(pVCpu);
6482 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6483 pVCpu->cpum.GstCtx.rip = uNewIp;
6484 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6485
6486#ifndef IEM_WITH_CODE_TLB
6487 /* Flush the prefetch buffer. */
6488 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6489#endif
6490
6491 return VINF_SUCCESS;
6492}
6493
6494
6495/**
6496 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6497 *
6498 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6499 * segment limit.
6500 *
6501 * @returns Strict VBox status code.
6502 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6503 * @param offNextInstr The offset of the next instruction.
6504 */
6505IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6506{
6507 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6508
6509 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6510 {
6511 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6512
6513 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6514 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6515 return iemRaiseGeneralProtectionFault0(pVCpu);
6516 pVCpu->cpum.GstCtx.rip = uNewEip;
6517 }
6518 else
6519 {
6520 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6521
6522 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6523 if (!IEM_IS_CANONICAL(uNewRip))
6524 return iemRaiseGeneralProtectionFault0(pVCpu);
6525 pVCpu->cpum.GstCtx.rip = uNewRip;
6526 }
6527 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6528
6529#ifndef IEM_WITH_CODE_TLB
6530 /* Flush the prefetch buffer. */
6531 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6532#endif
6533
6534 return VINF_SUCCESS;
6535}
6536
6537
6538/**
6539 * Performs a near jump to the specified address.
6540 *
6541 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6542 * segment limit.
6543 *
6544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6545 * @param uNewRip The new RIP value.
6546 */
6547IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6548{
6549 switch (pVCpu->iem.s.enmEffOpSize)
6550 {
6551 case IEMMODE_16BIT:
6552 {
6553 Assert(uNewRip <= UINT16_MAX);
6554 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6555 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6556 return iemRaiseGeneralProtectionFault0(pVCpu);
6557 /** @todo Test 16-bit jump in 64-bit mode. */
6558 pVCpu->cpum.GstCtx.rip = uNewRip;
6559 break;
6560 }
6561
6562 case IEMMODE_32BIT:
6563 {
6564 Assert(uNewRip <= UINT32_MAX);
6565 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6566 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6567
6568 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6569 return iemRaiseGeneralProtectionFault0(pVCpu);
6570 pVCpu->cpum.GstCtx.rip = uNewRip;
6571 break;
6572 }
6573
6574 case IEMMODE_64BIT:
6575 {
6576 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6577
6578 if (!IEM_IS_CANONICAL(uNewRip))
6579 return iemRaiseGeneralProtectionFault0(pVCpu);
6580 pVCpu->cpum.GstCtx.rip = uNewRip;
6581 break;
6582 }
6583
6584 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6585 }
6586
6587 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6588
6589#ifndef IEM_WITH_CODE_TLB
6590 /* Flush the prefetch buffer. */
6591 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6592#endif
6593
6594 return VINF_SUCCESS;
6595}
6596
6597
6598/**
6599 * Get the address of the top of the stack.
6600 *
6601 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6602 */
6603DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6604{
6605 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6606 return pVCpu->cpum.GstCtx.rsp;
6607 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6608 return pVCpu->cpum.GstCtx.esp;
6609 return pVCpu->cpum.GstCtx.sp;
6610}
6611
6612
6613/**
6614 * Updates the RIP/EIP/IP to point to the next instruction.
6615 *
6616 * This function leaves the EFLAGS.RF flag alone.
6617 *
6618 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6619 * @param cbInstr The number of bytes to add.
6620 */
6621IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6622{
6623 switch (pVCpu->iem.s.enmCpuMode)
6624 {
6625 case IEMMODE_16BIT:
6626 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6627 pVCpu->cpum.GstCtx.eip += cbInstr;
6628 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6629 break;
6630
6631 case IEMMODE_32BIT:
6632 pVCpu->cpum.GstCtx.eip += cbInstr;
6633 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6634 break;
6635
6636 case IEMMODE_64BIT:
6637 pVCpu->cpum.GstCtx.rip += cbInstr;
6638 break;
6639 default: AssertFailed();
6640 }
6641}
6642
6643
6644#if 0
6645/**
6646 * Updates the RIP/EIP/IP to point to the next instruction.
6647 *
6648 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6649 */
6650IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6651{
6652 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6653}
6654#endif
6655
6656
6657
6658/**
6659 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6660 *
6661 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6662 * @param cbInstr The number of bytes to add.
6663 */
6664IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6665{
6666 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6667
6668 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6669#if ARCH_BITS >= 64
6670 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6671 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6672 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6673#else
6674 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6675 pVCpu->cpum.GstCtx.rip += cbInstr;
6676 else
6677 pVCpu->cpum.GstCtx.eip += cbInstr;
6678#endif
6679}
6680
6681
6682/**
6683 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6684 *
6685 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6686 */
6687IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6688{
6689 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6690}
6691
6692
6693/**
6694 * Adds to the stack pointer.
6695 *
6696 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6697 * @param cbToAdd The number of bytes to add (8-bit!).
6698 */
6699DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6700{
6701 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6702 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6703 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6704 pVCpu->cpum.GstCtx.esp += cbToAdd;
6705 else
6706 pVCpu->cpum.GstCtx.sp += cbToAdd;
6707}
6708
6709
6710/**
6711 * Subtracts from the stack pointer.
6712 *
6713 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6714 * @param cbToSub The number of bytes to subtract (8-bit!).
6715 */
6716DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6717{
6718 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6719 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6720 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6721 pVCpu->cpum.GstCtx.esp -= cbToSub;
6722 else
6723 pVCpu->cpum.GstCtx.sp -= cbToSub;
6724}
6725
6726
6727/**
6728 * Adds to the temporary stack pointer.
6729 *
6730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6731 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6732 * @param cbToAdd The number of bytes to add (16-bit).
6733 */
6734DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6735{
6736 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6737 pTmpRsp->u += cbToAdd;
6738 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6739 pTmpRsp->DWords.dw0 += cbToAdd;
6740 else
6741 pTmpRsp->Words.w0 += cbToAdd;
6742}
6743
6744
6745/**
6746 * Subtracts from the temporary stack pointer.
6747 *
6748 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6749 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6750 * @param cbToSub The number of bytes to subtract.
6751 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6752 * expecting that.
6753 */
6754DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6755{
6756 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6757 pTmpRsp->u -= cbToSub;
6758 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6759 pTmpRsp->DWords.dw0 -= cbToSub;
6760 else
6761 pTmpRsp->Words.w0 -= cbToSub;
6762}
6763
6764
6765/**
6766 * Calculates the effective stack address for a push of the specified size as
6767 * well as the new RSP value (upper bits may be masked).
6768 *
6769 * @returns Effective stack addressf for the push.
6770 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6771 * @param cbItem The size of the stack item to pop.
6772 * @param puNewRsp Where to return the new RSP value.
6773 */
6774DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6775{
6776 RTUINT64U uTmpRsp;
6777 RTGCPTR GCPtrTop;
6778 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6779
6780 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6781 GCPtrTop = uTmpRsp.u -= cbItem;
6782 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6783 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6784 else
6785 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6786 *puNewRsp = uTmpRsp.u;
6787 return GCPtrTop;
6788}
6789
6790
6791/**
6792 * Gets the current stack pointer and calculates the value after a pop of the
6793 * specified size.
6794 *
6795 * @returns Current stack pointer.
6796 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6797 * @param cbItem The size of the stack item to pop.
6798 * @param puNewRsp Where to return the new RSP value.
6799 */
6800DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6801{
6802 RTUINT64U uTmpRsp;
6803 RTGCPTR GCPtrTop;
6804 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6805
6806 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6807 {
6808 GCPtrTop = uTmpRsp.u;
6809 uTmpRsp.u += cbItem;
6810 }
6811 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6812 {
6813 GCPtrTop = uTmpRsp.DWords.dw0;
6814 uTmpRsp.DWords.dw0 += cbItem;
6815 }
6816 else
6817 {
6818 GCPtrTop = uTmpRsp.Words.w0;
6819 uTmpRsp.Words.w0 += cbItem;
6820 }
6821 *puNewRsp = uTmpRsp.u;
6822 return GCPtrTop;
6823}
6824
6825
6826/**
6827 * Calculates the effective stack address for a push of the specified size as
6828 * well as the new temporary RSP value (upper bits may be masked).
6829 *
6830 * @returns Effective stack addressf for the push.
6831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6832 * @param pTmpRsp The temporary stack pointer. This is updated.
6833 * @param cbItem The size of the stack item to pop.
6834 */
6835DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6836{
6837 RTGCPTR GCPtrTop;
6838
6839 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6840 GCPtrTop = pTmpRsp->u -= cbItem;
6841 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6842 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6843 else
6844 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6845 return GCPtrTop;
6846}
6847
6848
6849/**
6850 * Gets the effective stack address for a pop of the specified size and
6851 * calculates and updates the temporary RSP.
6852 *
6853 * @returns Current stack pointer.
6854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6855 * @param pTmpRsp The temporary stack pointer. This is updated.
6856 * @param cbItem The size of the stack item to pop.
6857 */
6858DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6859{
6860 RTGCPTR GCPtrTop;
6861 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6862 {
6863 GCPtrTop = pTmpRsp->u;
6864 pTmpRsp->u += cbItem;
6865 }
6866 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6867 {
6868 GCPtrTop = pTmpRsp->DWords.dw0;
6869 pTmpRsp->DWords.dw0 += cbItem;
6870 }
6871 else
6872 {
6873 GCPtrTop = pTmpRsp->Words.w0;
6874 pTmpRsp->Words.w0 += cbItem;
6875 }
6876 return GCPtrTop;
6877}
6878
6879/** @} */
6880
6881
6882/** @name FPU access and helpers.
6883 *
6884 * @{
6885 */
6886
6887
6888/**
6889 * Hook for preparing to use the host FPU.
6890 *
6891 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6892 *
6893 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6894 */
6895DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6896{
6897#ifdef IN_RING3
6898 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6899#else
6900 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6901#endif
6902 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6903}
6904
6905
6906/**
6907 * Hook for preparing to use the host FPU for SSE.
6908 *
6909 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6910 *
6911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6912 */
6913DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6914{
6915 iemFpuPrepareUsage(pVCpu);
6916}
6917
6918
6919/**
6920 * Hook for preparing to use the host FPU for AVX.
6921 *
6922 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6923 *
6924 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6925 */
6926DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6927{
6928 iemFpuPrepareUsage(pVCpu);
6929}
6930
6931
6932/**
6933 * Hook for actualizing the guest FPU state before the interpreter reads it.
6934 *
6935 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6936 *
6937 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6938 */
6939DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6940{
6941#ifdef IN_RING3
6942 NOREF(pVCpu);
6943#else
6944 CPUMRZFpuStateActualizeForRead(pVCpu);
6945#endif
6946 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6947}
6948
6949
6950/**
6951 * Hook for actualizing the guest FPU state before the interpreter changes it.
6952 *
6953 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6954 *
6955 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6956 */
6957DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6958{
6959#ifdef IN_RING3
6960 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6961#else
6962 CPUMRZFpuStateActualizeForChange(pVCpu);
6963#endif
6964 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6965}
6966
6967
6968/**
6969 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6970 * only.
6971 *
6972 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6973 *
6974 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6975 */
6976DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6977{
6978#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6979 NOREF(pVCpu);
6980#else
6981 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6982#endif
6983 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6984}
6985
6986
6987/**
6988 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
6989 * read+write.
6990 *
6991 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6992 *
6993 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6994 */
6995DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
6996{
6997#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6998 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6999#else
7000 CPUMRZFpuStateActualizeForChange(pVCpu);
7001#endif
7002 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7003
7004 /* Make sure any changes are loaded the next time around. */
7005 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
7006}
7007
7008
7009/**
7010 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7011 * only.
7012 *
7013 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7014 *
7015 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7016 */
7017DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7018{
7019#ifdef IN_RING3
7020 NOREF(pVCpu);
7021#else
7022 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7023#endif
7024 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7025}
7026
7027
7028/**
7029 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7030 * read+write.
7031 *
7032 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7033 *
7034 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7035 */
7036DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7037{
7038#ifdef IN_RING3
7039 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7040#else
7041 CPUMRZFpuStateActualizeForChange(pVCpu);
7042#endif
7043 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7044
7045 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7046 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7047}
7048
7049
7050/**
7051 * Stores a QNaN value into a FPU register.
7052 *
7053 * @param pReg Pointer to the register.
7054 */
7055DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7056{
7057 pReg->au32[0] = UINT32_C(0x00000000);
7058 pReg->au32[1] = UINT32_C(0xc0000000);
7059 pReg->au16[4] = UINT16_C(0xffff);
7060}
7061
7062
7063/**
7064 * Updates the FOP, FPU.CS and FPUIP registers.
7065 *
7066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7067 * @param pFpuCtx The FPU context.
7068 */
7069DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7070{
7071 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7072 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7073 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7074 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7075 {
7076 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7077 * happens in real mode here based on the fnsave and fnstenv images. */
7078 pFpuCtx->CS = 0;
7079 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7080 }
7081 else
7082 {
7083 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7084 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7085 }
7086}
7087
7088
7089/**
7090 * Updates the x87.DS and FPUDP registers.
7091 *
7092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7093 * @param pFpuCtx The FPU context.
7094 * @param iEffSeg The effective segment register.
7095 * @param GCPtrEff The effective address relative to @a iEffSeg.
7096 */
7097DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7098{
7099 RTSEL sel;
7100 switch (iEffSeg)
7101 {
7102 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7103 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7104 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7105 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7106 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7107 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7108 default:
7109 AssertMsgFailed(("%d\n", iEffSeg));
7110 sel = pVCpu->cpum.GstCtx.ds.Sel;
7111 }
7112 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7113 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7114 {
7115 pFpuCtx->DS = 0;
7116 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7117 }
7118 else
7119 {
7120 pFpuCtx->DS = sel;
7121 pFpuCtx->FPUDP = GCPtrEff;
7122 }
7123}
7124
7125
7126/**
7127 * Rotates the stack registers in the push direction.
7128 *
7129 * @param pFpuCtx The FPU context.
7130 * @remarks This is a complete waste of time, but fxsave stores the registers in
7131 * stack order.
7132 */
7133DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7134{
7135 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7136 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7137 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7138 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7139 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7140 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7141 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7142 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7143 pFpuCtx->aRegs[0].r80 = r80Tmp;
7144}
7145
7146
7147/**
7148 * Rotates the stack registers in the pop direction.
7149 *
7150 * @param pFpuCtx The FPU context.
7151 * @remarks This is a complete waste of time, but fxsave stores the registers in
7152 * stack order.
7153 */
7154DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7155{
7156 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7157 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7158 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7159 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7160 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7161 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7162 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7163 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7164 pFpuCtx->aRegs[7].r80 = r80Tmp;
7165}
7166
7167
7168/**
7169 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7170 * exception prevents it.
7171 *
7172 * @param pResult The FPU operation result to push.
7173 * @param pFpuCtx The FPU context.
7174 */
7175IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7176{
7177 /* Update FSW and bail if there are pending exceptions afterwards. */
7178 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7179 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7180 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7181 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7182 {
7183 pFpuCtx->FSW = fFsw;
7184 return;
7185 }
7186
7187 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7188 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7189 {
7190 /* All is fine, push the actual value. */
7191 pFpuCtx->FTW |= RT_BIT(iNewTop);
7192 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7193 }
7194 else if (pFpuCtx->FCW & X86_FCW_IM)
7195 {
7196 /* Masked stack overflow, push QNaN. */
7197 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7198 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7199 }
7200 else
7201 {
7202 /* Raise stack overflow, don't push anything. */
7203 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7204 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7205 return;
7206 }
7207
7208 fFsw &= ~X86_FSW_TOP_MASK;
7209 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7210 pFpuCtx->FSW = fFsw;
7211
7212 iemFpuRotateStackPush(pFpuCtx);
7213}
7214
7215
7216/**
7217 * Stores a result in a FPU register and updates the FSW and FTW.
7218 *
7219 * @param pFpuCtx The FPU context.
7220 * @param pResult The result to store.
7221 * @param iStReg Which FPU register to store it in.
7222 */
7223IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7224{
7225 Assert(iStReg < 8);
7226 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7227 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7228 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7229 pFpuCtx->FTW |= RT_BIT(iReg);
7230 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7231}
7232
7233
7234/**
7235 * Only updates the FPU status word (FSW) with the result of the current
7236 * instruction.
7237 *
7238 * @param pFpuCtx The FPU context.
7239 * @param u16FSW The FSW output of the current instruction.
7240 */
7241IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7242{
7243 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7244 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7245}
7246
7247
7248/**
7249 * Pops one item off the FPU stack if no pending exception prevents it.
7250 *
7251 * @param pFpuCtx The FPU context.
7252 */
7253IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7254{
7255 /* Check pending exceptions. */
7256 uint16_t uFSW = pFpuCtx->FSW;
7257 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7258 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7259 return;
7260
7261 /* TOP--. */
7262 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7263 uFSW &= ~X86_FSW_TOP_MASK;
7264 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7265 pFpuCtx->FSW = uFSW;
7266
7267 /* Mark the previous ST0 as empty. */
7268 iOldTop >>= X86_FSW_TOP_SHIFT;
7269 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7270
7271 /* Rotate the registers. */
7272 iemFpuRotateStackPop(pFpuCtx);
7273}
7274
7275
7276/**
7277 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7278 *
7279 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7280 * @param pResult The FPU operation result to push.
7281 */
7282IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7283{
7284 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7285 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7286 iemFpuMaybePushResult(pResult, pFpuCtx);
7287}
7288
7289
7290/**
7291 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7292 * and sets FPUDP and FPUDS.
7293 *
7294 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7295 * @param pResult The FPU operation result to push.
7296 * @param iEffSeg The effective segment register.
7297 * @param GCPtrEff The effective address relative to @a iEffSeg.
7298 */
7299IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7300{
7301 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7302 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7303 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7304 iemFpuMaybePushResult(pResult, pFpuCtx);
7305}
7306
7307
7308/**
7309 * Replace ST0 with the first value and push the second onto the FPU stack,
7310 * unless a pending exception prevents it.
7311 *
7312 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7313 * @param pResult The FPU operation result to store and push.
7314 */
7315IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7316{
7317 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7318 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7319
7320 /* Update FSW and bail if there are pending exceptions afterwards. */
7321 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7322 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7323 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7324 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7325 {
7326 pFpuCtx->FSW = fFsw;
7327 return;
7328 }
7329
7330 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7331 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7332 {
7333 /* All is fine, push the actual value. */
7334 pFpuCtx->FTW |= RT_BIT(iNewTop);
7335 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7336 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7337 }
7338 else if (pFpuCtx->FCW & X86_FCW_IM)
7339 {
7340 /* Masked stack overflow, push QNaN. */
7341 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7342 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7343 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7344 }
7345 else
7346 {
7347 /* Raise stack overflow, don't push anything. */
7348 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7349 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7350 return;
7351 }
7352
7353 fFsw &= ~X86_FSW_TOP_MASK;
7354 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7355 pFpuCtx->FSW = fFsw;
7356
7357 iemFpuRotateStackPush(pFpuCtx);
7358}
7359
7360
7361/**
7362 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7363 * FOP.
7364 *
7365 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7366 * @param pResult The result to store.
7367 * @param iStReg Which FPU register to store it in.
7368 */
7369IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7370{
7371 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7372 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7373 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7374}
7375
7376
7377/**
7378 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7379 * FOP, and then pops the stack.
7380 *
7381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7382 * @param pResult The result to store.
7383 * @param iStReg Which FPU register to store it in.
7384 */
7385IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7386{
7387 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7388 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7389 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7390 iemFpuMaybePopOne(pFpuCtx);
7391}
7392
7393
7394/**
7395 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7396 * FPUDP, and FPUDS.
7397 *
7398 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7399 * @param pResult The result to store.
7400 * @param iStReg Which FPU register to store it in.
7401 * @param iEffSeg The effective memory operand selector register.
7402 * @param GCPtrEff The effective memory operand offset.
7403 */
7404IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7405 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7406{
7407 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7408 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7409 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7410 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7411}
7412
7413
7414/**
7415 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7416 * FPUDP, and FPUDS, and then pops the stack.
7417 *
7418 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7419 * @param pResult The result to store.
7420 * @param iStReg Which FPU register to store it in.
7421 * @param iEffSeg The effective memory operand selector register.
7422 * @param GCPtrEff The effective memory operand offset.
7423 */
7424IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7425 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7426{
7427 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7428 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7429 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7430 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7431 iemFpuMaybePopOne(pFpuCtx);
7432}
7433
7434
7435/**
7436 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7437 *
7438 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7439 */
7440IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7441{
7442 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7443 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7444}
7445
7446
7447/**
7448 * Marks the specified stack register as free (for FFREE).
7449 *
7450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7451 * @param iStReg The register to free.
7452 */
7453IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7454{
7455 Assert(iStReg < 8);
7456 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7457 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7458 pFpuCtx->FTW &= ~RT_BIT(iReg);
7459}
7460
7461
7462/**
7463 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7464 *
7465 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7466 */
7467IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7468{
7469 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7470 uint16_t uFsw = pFpuCtx->FSW;
7471 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7472 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7473 uFsw &= ~X86_FSW_TOP_MASK;
7474 uFsw |= uTop;
7475 pFpuCtx->FSW = uFsw;
7476}
7477
7478
7479/**
7480 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7481 *
7482 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7483 */
7484IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7485{
7486 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7487 uint16_t uFsw = pFpuCtx->FSW;
7488 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7489 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7490 uFsw &= ~X86_FSW_TOP_MASK;
7491 uFsw |= uTop;
7492 pFpuCtx->FSW = uFsw;
7493}
7494
7495
7496/**
7497 * Updates the FSW, FOP, FPUIP, and FPUCS.
7498 *
7499 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7500 * @param u16FSW The FSW from the current instruction.
7501 */
7502IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7503{
7504 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7505 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7506 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7507}
7508
7509
7510/**
7511 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7512 *
7513 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7514 * @param u16FSW The FSW from the current instruction.
7515 */
7516IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7517{
7518 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7519 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7520 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7521 iemFpuMaybePopOne(pFpuCtx);
7522}
7523
7524
7525/**
7526 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7527 *
7528 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7529 * @param u16FSW The FSW from the current instruction.
7530 * @param iEffSeg The effective memory operand selector register.
7531 * @param GCPtrEff The effective memory operand offset.
7532 */
7533IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7534{
7535 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7536 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7537 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7538 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7539}
7540
7541
7542/**
7543 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7544 *
7545 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7546 * @param u16FSW The FSW from the current instruction.
7547 */
7548IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7549{
7550 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7551 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7552 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7553 iemFpuMaybePopOne(pFpuCtx);
7554 iemFpuMaybePopOne(pFpuCtx);
7555}
7556
7557
7558/**
7559 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7560 *
7561 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7562 * @param u16FSW The FSW from the current instruction.
7563 * @param iEffSeg The effective memory operand selector register.
7564 * @param GCPtrEff The effective memory operand offset.
7565 */
7566IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7567{
7568 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7569 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7570 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7571 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7572 iemFpuMaybePopOne(pFpuCtx);
7573}
7574
7575
7576/**
7577 * Worker routine for raising an FPU stack underflow exception.
7578 *
7579 * @param pFpuCtx The FPU context.
7580 * @param iStReg The stack register being accessed.
7581 */
7582IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7583{
7584 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7585 if (pFpuCtx->FCW & X86_FCW_IM)
7586 {
7587 /* Masked underflow. */
7588 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7589 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7590 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7591 if (iStReg != UINT8_MAX)
7592 {
7593 pFpuCtx->FTW |= RT_BIT(iReg);
7594 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7595 }
7596 }
7597 else
7598 {
7599 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7600 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7601 }
7602}
7603
7604
7605/**
7606 * Raises a FPU stack underflow exception.
7607 *
7608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7609 * @param iStReg The destination register that should be loaded
7610 * with QNaN if \#IS is not masked. Specify
7611 * UINT8_MAX if none (like for fcom).
7612 */
7613DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7614{
7615 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7616 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7617 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7618}
7619
7620
7621DECL_NO_INLINE(IEM_STATIC, void)
7622iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7623{
7624 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7625 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7626 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7627 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7628}
7629
7630
7631DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7632{
7633 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7634 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7635 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7636 iemFpuMaybePopOne(pFpuCtx);
7637}
7638
7639
7640DECL_NO_INLINE(IEM_STATIC, void)
7641iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7642{
7643 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7644 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7646 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7647 iemFpuMaybePopOne(pFpuCtx);
7648}
7649
7650
7651DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7652{
7653 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7654 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7655 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7656 iemFpuMaybePopOne(pFpuCtx);
7657 iemFpuMaybePopOne(pFpuCtx);
7658}
7659
7660
7661DECL_NO_INLINE(IEM_STATIC, void)
7662iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7663{
7664 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7665 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7666
7667 if (pFpuCtx->FCW & X86_FCW_IM)
7668 {
7669 /* Masked overflow - Push QNaN. */
7670 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7671 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7672 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7673 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7674 pFpuCtx->FTW |= RT_BIT(iNewTop);
7675 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7676 iemFpuRotateStackPush(pFpuCtx);
7677 }
7678 else
7679 {
7680 /* Exception pending - don't change TOP or the register stack. */
7681 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7682 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7683 }
7684}
7685
7686
7687DECL_NO_INLINE(IEM_STATIC, void)
7688iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7689{
7690 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7691 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7692
7693 if (pFpuCtx->FCW & X86_FCW_IM)
7694 {
7695 /* Masked overflow - Push QNaN. */
7696 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7697 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7698 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7699 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7700 pFpuCtx->FTW |= RT_BIT(iNewTop);
7701 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7702 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7703 iemFpuRotateStackPush(pFpuCtx);
7704 }
7705 else
7706 {
7707 /* Exception pending - don't change TOP or the register stack. */
7708 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7709 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7710 }
7711}
7712
7713
7714/**
7715 * Worker routine for raising an FPU stack overflow exception on a push.
7716 *
7717 * @param pFpuCtx The FPU context.
7718 */
7719IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7720{
7721 if (pFpuCtx->FCW & X86_FCW_IM)
7722 {
7723 /* Masked overflow. */
7724 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7725 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7726 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7727 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7728 pFpuCtx->FTW |= RT_BIT(iNewTop);
7729 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7730 iemFpuRotateStackPush(pFpuCtx);
7731 }
7732 else
7733 {
7734 /* Exception pending - don't change TOP or the register stack. */
7735 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7736 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7737 }
7738}
7739
7740
7741/**
7742 * Raises a FPU stack overflow exception on a push.
7743 *
7744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7745 */
7746DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7747{
7748 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7749 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7750 iemFpuStackPushOverflowOnly(pFpuCtx);
7751}
7752
7753
7754/**
7755 * Raises a FPU stack overflow exception on a push with a memory operand.
7756 *
7757 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7758 * @param iEffSeg The effective memory operand selector register.
7759 * @param GCPtrEff The effective memory operand offset.
7760 */
7761DECL_NO_INLINE(IEM_STATIC, void)
7762iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7763{
7764 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7765 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7766 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7767 iemFpuStackPushOverflowOnly(pFpuCtx);
7768}
7769
7770
7771IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7772{
7773 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7774 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7775 if (pFpuCtx->FTW & RT_BIT(iReg))
7776 return VINF_SUCCESS;
7777 return VERR_NOT_FOUND;
7778}
7779
7780
7781IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7782{
7783 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7784 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7785 if (pFpuCtx->FTW & RT_BIT(iReg))
7786 {
7787 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7788 return VINF_SUCCESS;
7789 }
7790 return VERR_NOT_FOUND;
7791}
7792
7793
7794IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7795 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7796{
7797 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7798 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7799 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7800 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7801 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7802 {
7803 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7804 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7805 return VINF_SUCCESS;
7806 }
7807 return VERR_NOT_FOUND;
7808}
7809
7810
7811IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7812{
7813 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7814 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7815 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7816 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7817 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7818 {
7819 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7820 return VINF_SUCCESS;
7821 }
7822 return VERR_NOT_FOUND;
7823}
7824
7825
7826/**
7827 * Updates the FPU exception status after FCW is changed.
7828 *
7829 * @param pFpuCtx The FPU context.
7830 */
7831IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7832{
7833 uint16_t u16Fsw = pFpuCtx->FSW;
7834 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7835 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7836 else
7837 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7838 pFpuCtx->FSW = u16Fsw;
7839}
7840
7841
7842/**
7843 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7844 *
7845 * @returns The full FTW.
7846 * @param pFpuCtx The FPU context.
7847 */
7848IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7849{
7850 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7851 uint16_t u16Ftw = 0;
7852 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7853 for (unsigned iSt = 0; iSt < 8; iSt++)
7854 {
7855 unsigned const iReg = (iSt + iTop) & 7;
7856 if (!(u8Ftw & RT_BIT(iReg)))
7857 u16Ftw |= 3 << (iReg * 2); /* empty */
7858 else
7859 {
7860 uint16_t uTag;
7861 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7862 if (pr80Reg->s.uExponent == 0x7fff)
7863 uTag = 2; /* Exponent is all 1's => Special. */
7864 else if (pr80Reg->s.uExponent == 0x0000)
7865 {
7866 if (pr80Reg->s.u64Mantissa == 0x0000)
7867 uTag = 1; /* All bits are zero => Zero. */
7868 else
7869 uTag = 2; /* Must be special. */
7870 }
7871 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7872 uTag = 0; /* Valid. */
7873 else
7874 uTag = 2; /* Must be special. */
7875
7876 u16Ftw |= uTag << (iReg * 2); /* empty */
7877 }
7878 }
7879
7880 return u16Ftw;
7881}
7882
7883
7884/**
7885 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7886 *
7887 * @returns The compressed FTW.
7888 * @param u16FullFtw The full FTW to convert.
7889 */
7890IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7891{
7892 uint8_t u8Ftw = 0;
7893 for (unsigned i = 0; i < 8; i++)
7894 {
7895 if ((u16FullFtw & 3) != 3 /*empty*/)
7896 u8Ftw |= RT_BIT(i);
7897 u16FullFtw >>= 2;
7898 }
7899
7900 return u8Ftw;
7901}
7902
7903/** @} */
7904
7905
7906/** @name Memory access.
7907 *
7908 * @{
7909 */
7910
7911
7912/**
7913 * Updates the IEMCPU::cbWritten counter if applicable.
7914 *
7915 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7916 * @param fAccess The access being accounted for.
7917 * @param cbMem The access size.
7918 */
7919DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7920{
7921 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7922 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7923 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7924}
7925
7926
7927/**
7928 * Checks if the given segment can be written to, raise the appropriate
7929 * exception if not.
7930 *
7931 * @returns VBox strict status code.
7932 *
7933 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7934 * @param pHid Pointer to the hidden register.
7935 * @param iSegReg The register number.
7936 * @param pu64BaseAddr Where to return the base address to use for the
7937 * segment. (In 64-bit code it may differ from the
7938 * base in the hidden segment.)
7939 */
7940IEM_STATIC VBOXSTRICTRC
7941iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7942{
7943 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7944
7945 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7946 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7947 else
7948 {
7949 if (!pHid->Attr.n.u1Present)
7950 {
7951 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7952 AssertRelease(uSel == 0);
7953 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7954 return iemRaiseGeneralProtectionFault0(pVCpu);
7955 }
7956
7957 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7958 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7959 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7960 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7961 *pu64BaseAddr = pHid->u64Base;
7962 }
7963 return VINF_SUCCESS;
7964}
7965
7966
7967/**
7968 * Checks if the given segment can be read from, raise the appropriate
7969 * exception if not.
7970 *
7971 * @returns VBox strict status code.
7972 *
7973 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7974 * @param pHid Pointer to the hidden register.
7975 * @param iSegReg The register number.
7976 * @param pu64BaseAddr Where to return the base address to use for the
7977 * segment. (In 64-bit code it may differ from the
7978 * base in the hidden segment.)
7979 */
7980IEM_STATIC VBOXSTRICTRC
7981iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7982{
7983 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7984
7985 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7986 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7987 else
7988 {
7989 if (!pHid->Attr.n.u1Present)
7990 {
7991 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7992 AssertRelease(uSel == 0);
7993 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7994 return iemRaiseGeneralProtectionFault0(pVCpu);
7995 }
7996
7997 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
7998 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
7999 *pu64BaseAddr = pHid->u64Base;
8000 }
8001 return VINF_SUCCESS;
8002}
8003
8004
8005/**
8006 * Applies the segment limit, base and attributes.
8007 *
8008 * This may raise a \#GP or \#SS.
8009 *
8010 * @returns VBox strict status code.
8011 *
8012 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8013 * @param fAccess The kind of access which is being performed.
8014 * @param iSegReg The index of the segment register to apply.
8015 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8016 * TSS, ++).
8017 * @param cbMem The access size.
8018 * @param pGCPtrMem Pointer to the guest memory address to apply
8019 * segmentation to. Input and output parameter.
8020 */
8021IEM_STATIC VBOXSTRICTRC
8022iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8023{
8024 if (iSegReg == UINT8_MAX)
8025 return VINF_SUCCESS;
8026
8027 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8028 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8029 switch (pVCpu->iem.s.enmCpuMode)
8030 {
8031 case IEMMODE_16BIT:
8032 case IEMMODE_32BIT:
8033 {
8034 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8035 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8036
8037 if ( pSel->Attr.n.u1Present
8038 && !pSel->Attr.n.u1Unusable)
8039 {
8040 Assert(pSel->Attr.n.u1DescType);
8041 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8042 {
8043 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8044 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8045 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8046
8047 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8048 {
8049 /** @todo CPL check. */
8050 }
8051
8052 /*
8053 * There are two kinds of data selectors, normal and expand down.
8054 */
8055 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8056 {
8057 if ( GCPtrFirst32 > pSel->u32Limit
8058 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8059 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8060 }
8061 else
8062 {
8063 /*
8064 * The upper boundary is defined by the B bit, not the G bit!
8065 */
8066 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8067 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8068 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8069 }
8070 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8071 }
8072 else
8073 {
8074
8075 /*
8076 * Code selector and usually be used to read thru, writing is
8077 * only permitted in real and V8086 mode.
8078 */
8079 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8080 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8081 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8082 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8083 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8084
8085 if ( GCPtrFirst32 > pSel->u32Limit
8086 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8087 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8088
8089 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8090 {
8091 /** @todo CPL check. */
8092 }
8093
8094 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8095 }
8096 }
8097 else
8098 return iemRaiseGeneralProtectionFault0(pVCpu);
8099 return VINF_SUCCESS;
8100 }
8101
8102 case IEMMODE_64BIT:
8103 {
8104 RTGCPTR GCPtrMem = *pGCPtrMem;
8105 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8106 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8107
8108 Assert(cbMem >= 1);
8109 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8110 return VINF_SUCCESS;
8111 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8112 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8113 return iemRaiseGeneralProtectionFault0(pVCpu);
8114 }
8115
8116 default:
8117 AssertFailedReturn(VERR_IEM_IPE_7);
8118 }
8119}
8120
8121
8122/**
8123 * Translates a virtual address to a physical physical address and checks if we
8124 * can access the page as specified.
8125 *
8126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8127 * @param GCPtrMem The virtual address.
8128 * @param fAccess The intended access.
8129 * @param pGCPhysMem Where to return the physical address.
8130 */
8131IEM_STATIC VBOXSTRICTRC
8132iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8133{
8134 /** @todo Need a different PGM interface here. We're currently using
8135 * generic / REM interfaces. this won't cut it for R0. */
8136 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8137 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8138 * here. */
8139 PGMPTWALK Walk;
8140 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
8141 if (RT_FAILURE(rc))
8142 {
8143 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8144 /** @todo Check unassigned memory in unpaged mode. */
8145 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8146 *pGCPhysMem = NIL_RTGCPHYS;
8147 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8148 }
8149
8150 /* If the page is writable and does not have the no-exec bit set, all
8151 access is allowed. Otherwise we'll have to check more carefully... */
8152 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8153 {
8154 /* Write to read only memory? */
8155 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8156 && !(Walk.fEffective & X86_PTE_RW)
8157 && ( ( pVCpu->iem.s.uCpl == 3
8158 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8159 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8160 {
8161 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8162 *pGCPhysMem = NIL_RTGCPHYS;
8163 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8164 }
8165
8166 /* Kernel memory accessed by userland? */
8167 if ( !(Walk.fEffective & X86_PTE_US)
8168 && pVCpu->iem.s.uCpl == 3
8169 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8170 {
8171 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8172 *pGCPhysMem = NIL_RTGCPHYS;
8173 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8174 }
8175
8176 /* Executing non-executable memory? */
8177 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8178 && (Walk.fEffective & X86_PTE_PAE_NX)
8179 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8180 {
8181 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8182 *pGCPhysMem = NIL_RTGCPHYS;
8183 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8184 VERR_ACCESS_DENIED);
8185 }
8186 }
8187
8188 /*
8189 * Set the dirty / access flags.
8190 * ASSUMES this is set when the address is translated rather than on committ...
8191 */
8192 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8193 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8194 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
8195 {
8196 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8197 AssertRC(rc2);
8198 }
8199
8200 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & PAGE_OFFSET_MASK);
8201 *pGCPhysMem = GCPhys;
8202 return VINF_SUCCESS;
8203}
8204
8205
8206
8207/**
8208 * Maps a physical page.
8209 *
8210 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8211 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8212 * @param GCPhysMem The physical address.
8213 * @param fAccess The intended access.
8214 * @param ppvMem Where to return the mapping address.
8215 * @param pLock The PGM lock.
8216 */
8217IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8218{
8219#ifdef IEM_LOG_MEMORY_WRITES
8220 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8221 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8222#endif
8223
8224 /** @todo This API may require some improving later. A private deal with PGM
8225 * regarding locking and unlocking needs to be struct. A couple of TLBs
8226 * living in PGM, but with publicly accessible inlined access methods
8227 * could perhaps be an even better solution. */
8228 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8229 GCPhysMem,
8230 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8231 pVCpu->iem.s.fBypassHandlers,
8232 ppvMem,
8233 pLock);
8234 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8235 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8236
8237 return rc;
8238}
8239
8240
8241/**
8242 * Unmap a page previously mapped by iemMemPageMap.
8243 *
8244 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8245 * @param GCPhysMem The physical address.
8246 * @param fAccess The intended access.
8247 * @param pvMem What iemMemPageMap returned.
8248 * @param pLock The PGM lock.
8249 */
8250DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8251{
8252 NOREF(pVCpu);
8253 NOREF(GCPhysMem);
8254 NOREF(fAccess);
8255 NOREF(pvMem);
8256 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8257}
8258
8259
8260/**
8261 * Looks up a memory mapping entry.
8262 *
8263 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8264 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8265 * @param pvMem The memory address.
8266 * @param fAccess The access to.
8267 */
8268DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8269{
8270 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8271 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8272 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8273 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8274 return 0;
8275 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8276 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8277 return 1;
8278 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8279 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8280 return 2;
8281 return VERR_NOT_FOUND;
8282}
8283
8284
8285/**
8286 * Finds a free memmap entry when using iNextMapping doesn't work.
8287 *
8288 * @returns Memory mapping index, 1024 on failure.
8289 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8290 */
8291IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8292{
8293 /*
8294 * The easy case.
8295 */
8296 if (pVCpu->iem.s.cActiveMappings == 0)
8297 {
8298 pVCpu->iem.s.iNextMapping = 1;
8299 return 0;
8300 }
8301
8302 /* There should be enough mappings for all instructions. */
8303 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8304
8305 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8306 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8307 return i;
8308
8309 AssertFailedReturn(1024);
8310}
8311
8312
8313/**
8314 * Commits a bounce buffer that needs writing back and unmaps it.
8315 *
8316 * @returns Strict VBox status code.
8317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8318 * @param iMemMap The index of the buffer to commit.
8319 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8320 * Always false in ring-3, obviously.
8321 */
8322IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8323{
8324 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8325 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8326#ifdef IN_RING3
8327 Assert(!fPostponeFail);
8328 RT_NOREF_PV(fPostponeFail);
8329#endif
8330
8331 /*
8332 * Do the writing.
8333 */
8334 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8335 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8336 {
8337 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8338 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8339 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8340 if (!pVCpu->iem.s.fBypassHandlers)
8341 {
8342 /*
8343 * Carefully and efficiently dealing with access handler return
8344 * codes make this a little bloated.
8345 */
8346 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8347 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8348 pbBuf,
8349 cbFirst,
8350 PGMACCESSORIGIN_IEM);
8351 if (rcStrict == VINF_SUCCESS)
8352 {
8353 if (cbSecond)
8354 {
8355 rcStrict = PGMPhysWrite(pVM,
8356 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8357 pbBuf + cbFirst,
8358 cbSecond,
8359 PGMACCESSORIGIN_IEM);
8360 if (rcStrict == VINF_SUCCESS)
8361 { /* nothing */ }
8362 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8363 {
8364 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8365 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8367 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8368 }
8369#ifndef IN_RING3
8370 else if (fPostponeFail)
8371 {
8372 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8373 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8374 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8375 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8376 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8377 return iemSetPassUpStatus(pVCpu, rcStrict);
8378 }
8379#endif
8380 else
8381 {
8382 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8383 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8385 return rcStrict;
8386 }
8387 }
8388 }
8389 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8390 {
8391 if (!cbSecond)
8392 {
8393 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8394 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8395 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8396 }
8397 else
8398 {
8399 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8400 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8401 pbBuf + cbFirst,
8402 cbSecond,
8403 PGMACCESSORIGIN_IEM);
8404 if (rcStrict2 == VINF_SUCCESS)
8405 {
8406 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8407 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8408 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8409 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8410 }
8411 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8412 {
8413 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8414 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8415 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8416 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8417 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8418 }
8419#ifndef IN_RING3
8420 else if (fPostponeFail)
8421 {
8422 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8423 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8424 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8425 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8426 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8427 return iemSetPassUpStatus(pVCpu, rcStrict);
8428 }
8429#endif
8430 else
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8435 return rcStrict2;
8436 }
8437 }
8438 }
8439#ifndef IN_RING3
8440 else if (fPostponeFail)
8441 {
8442 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8444 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8445 if (!cbSecond)
8446 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8447 else
8448 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8449 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8450 return iemSetPassUpStatus(pVCpu, rcStrict);
8451 }
8452#endif
8453 else
8454 {
8455 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8456 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8457 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8458 return rcStrict;
8459 }
8460 }
8461 else
8462 {
8463 /*
8464 * No access handlers, much simpler.
8465 */
8466 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8467 if (RT_SUCCESS(rc))
8468 {
8469 if (cbSecond)
8470 {
8471 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8472 if (RT_SUCCESS(rc))
8473 { /* likely */ }
8474 else
8475 {
8476 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8477 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8478 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8479 return rc;
8480 }
8481 }
8482 }
8483 else
8484 {
8485 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8486 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8487 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8488 return rc;
8489 }
8490 }
8491 }
8492
8493#if defined(IEM_LOG_MEMORY_WRITES)
8494 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8495 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8496 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8497 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8498 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8499 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8500
8501 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8502 g_cbIemWrote = cbWrote;
8503 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8504#endif
8505
8506 /*
8507 * Free the mapping entry.
8508 */
8509 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8510 Assert(pVCpu->iem.s.cActiveMappings != 0);
8511 pVCpu->iem.s.cActiveMappings--;
8512 return VINF_SUCCESS;
8513}
8514
8515
8516/**
8517 * iemMemMap worker that deals with a request crossing pages.
8518 */
8519IEM_STATIC VBOXSTRICTRC
8520iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8521{
8522 /*
8523 * Do the address translations.
8524 */
8525 RTGCPHYS GCPhysFirst;
8526 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8527 if (rcStrict != VINF_SUCCESS)
8528 return rcStrict;
8529
8530 RTGCPHYS GCPhysSecond;
8531 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8532 fAccess, &GCPhysSecond);
8533 if (rcStrict != VINF_SUCCESS)
8534 return rcStrict;
8535 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8536
8537 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8538
8539 /*
8540 * Read in the current memory content if it's a read, execute or partial
8541 * write access.
8542 */
8543 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8544 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8545 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8546
8547 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8548 {
8549 if (!pVCpu->iem.s.fBypassHandlers)
8550 {
8551 /*
8552 * Must carefully deal with access handler status codes here,
8553 * makes the code a bit bloated.
8554 */
8555 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8556 if (rcStrict == VINF_SUCCESS)
8557 {
8558 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8559 if (rcStrict == VINF_SUCCESS)
8560 { /*likely */ }
8561 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8562 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8563 else
8564 {
8565 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8566 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8567 return rcStrict;
8568 }
8569 }
8570 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8571 {
8572 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8573 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8574 {
8575 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8576 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8577 }
8578 else
8579 {
8580 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8581 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8582 return rcStrict2;
8583 }
8584 }
8585 else
8586 {
8587 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8588 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8589 return rcStrict;
8590 }
8591 }
8592 else
8593 {
8594 /*
8595 * No informational status codes here, much more straight forward.
8596 */
8597 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8598 if (RT_SUCCESS(rc))
8599 {
8600 Assert(rc == VINF_SUCCESS);
8601 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8602 if (RT_SUCCESS(rc))
8603 Assert(rc == VINF_SUCCESS);
8604 else
8605 {
8606 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8607 return rc;
8608 }
8609 }
8610 else
8611 {
8612 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8613 return rc;
8614 }
8615 }
8616 }
8617#ifdef VBOX_STRICT
8618 else
8619 memset(pbBuf, 0xcc, cbMem);
8620 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8621 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8622#endif
8623
8624 /*
8625 * Commit the bounce buffer entry.
8626 */
8627 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8628 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8629 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8630 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8631 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8632 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8633 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8634 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8635 pVCpu->iem.s.cActiveMappings++;
8636
8637 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8638 *ppvMem = pbBuf;
8639 return VINF_SUCCESS;
8640}
8641
8642
8643/**
8644 * iemMemMap woker that deals with iemMemPageMap failures.
8645 */
8646IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8647 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8648{
8649 /*
8650 * Filter out conditions we can handle and the ones which shouldn't happen.
8651 */
8652 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8653 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8654 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8655 {
8656 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8657 return rcMap;
8658 }
8659 pVCpu->iem.s.cPotentialExits++;
8660
8661 /*
8662 * Read in the current memory content if it's a read, execute or partial
8663 * write access.
8664 */
8665 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8666 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8667 {
8668 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8669 memset(pbBuf, 0xff, cbMem);
8670 else
8671 {
8672 int rc;
8673 if (!pVCpu->iem.s.fBypassHandlers)
8674 {
8675 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8676 if (rcStrict == VINF_SUCCESS)
8677 { /* nothing */ }
8678 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8679 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8680 else
8681 {
8682 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8683 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8684 return rcStrict;
8685 }
8686 }
8687 else
8688 {
8689 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8690 if (RT_SUCCESS(rc))
8691 { /* likely */ }
8692 else
8693 {
8694 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8695 GCPhysFirst, rc));
8696 return rc;
8697 }
8698 }
8699 }
8700 }
8701#ifdef VBOX_STRICT
8702 else
8703 memset(pbBuf, 0xcc, cbMem);
8704#endif
8705#ifdef VBOX_STRICT
8706 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8707 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8708#endif
8709
8710 /*
8711 * Commit the bounce buffer entry.
8712 */
8713 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8714 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8715 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8716 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8717 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8718 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8719 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8720 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8721 pVCpu->iem.s.cActiveMappings++;
8722
8723 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8724 *ppvMem = pbBuf;
8725 return VINF_SUCCESS;
8726}
8727
8728
8729
8730/**
8731 * Maps the specified guest memory for the given kind of access.
8732 *
8733 * This may be using bounce buffering of the memory if it's crossing a page
8734 * boundary or if there is an access handler installed for any of it. Because
8735 * of lock prefix guarantees, we're in for some extra clutter when this
8736 * happens.
8737 *
8738 * This may raise a \#GP, \#SS, \#PF or \#AC.
8739 *
8740 * @returns VBox strict status code.
8741 *
8742 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8743 * @param ppvMem Where to return the pointer to the mapped
8744 * memory.
8745 * @param cbMem The number of bytes to map. This is usually 1,
8746 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8747 * string operations it can be up to a page.
8748 * @param iSegReg The index of the segment register to use for
8749 * this access. The base and limits are checked.
8750 * Use UINT8_MAX to indicate that no segmentation
8751 * is required (for IDT, GDT and LDT accesses).
8752 * @param GCPtrMem The address of the guest memory.
8753 * @param fAccess How the memory is being accessed. The
8754 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8755 * how to map the memory, while the
8756 * IEM_ACCESS_WHAT_XXX bit is used when raising
8757 * exceptions.
8758 */
8759IEM_STATIC VBOXSTRICTRC
8760iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8761{
8762 /*
8763 * Check the input and figure out which mapping entry to use.
8764 */
8765 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8766 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8767 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8768
8769 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8770 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8771 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8772 {
8773 iMemMap = iemMemMapFindFree(pVCpu);
8774 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8775 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8776 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8777 pVCpu->iem.s.aMemMappings[2].fAccess),
8778 VERR_IEM_IPE_9);
8779 }
8780
8781 /*
8782 * Map the memory, checking that we can actually access it. If something
8783 * slightly complicated happens, fall back on bounce buffering.
8784 */
8785 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8786 if (rcStrict != VINF_SUCCESS)
8787 return rcStrict;
8788
8789 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8790 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8791
8792 RTGCPHYS GCPhysFirst;
8793 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8794 if (rcStrict != VINF_SUCCESS)
8795 return rcStrict;
8796
8797 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8798 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8799 if (fAccess & IEM_ACCESS_TYPE_READ)
8800 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8801
8802 void *pvMem;
8803 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8804 if (rcStrict != VINF_SUCCESS)
8805 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8806
8807 /*
8808 * Fill in the mapping table entry.
8809 */
8810 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8811 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8812 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8813 pVCpu->iem.s.cActiveMappings++;
8814
8815 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8816 *ppvMem = pvMem;
8817
8818 return VINF_SUCCESS;
8819}
8820
8821
8822/**
8823 * Commits the guest memory if bounce buffered and unmaps it.
8824 *
8825 * @returns Strict VBox status code.
8826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8827 * @param pvMem The mapping.
8828 * @param fAccess The kind of access.
8829 */
8830IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8831{
8832 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8833 AssertReturn(iMemMap >= 0, iMemMap);
8834
8835 /* If it's bounce buffered, we may need to write back the buffer. */
8836 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8837 {
8838 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8839 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8840 }
8841 /* Otherwise unlock it. */
8842 else
8843 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8844
8845 /* Free the entry. */
8846 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8847 Assert(pVCpu->iem.s.cActiveMappings != 0);
8848 pVCpu->iem.s.cActiveMappings--;
8849 return VINF_SUCCESS;
8850}
8851
8852#ifdef IEM_WITH_SETJMP
8853
8854/**
8855 * Maps the specified guest memory for the given kind of access, longjmp on
8856 * error.
8857 *
8858 * This may be using bounce buffering of the memory if it's crossing a page
8859 * boundary or if there is an access handler installed for any of it. Because
8860 * of lock prefix guarantees, we're in for some extra clutter when this
8861 * happens.
8862 *
8863 * This may raise a \#GP, \#SS, \#PF or \#AC.
8864 *
8865 * @returns Pointer to the mapped memory.
8866 *
8867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8868 * @param cbMem The number of bytes to map. This is usually 1,
8869 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8870 * string operations it can be up to a page.
8871 * @param iSegReg The index of the segment register to use for
8872 * this access. The base and limits are checked.
8873 * Use UINT8_MAX to indicate that no segmentation
8874 * is required (for IDT, GDT and LDT accesses).
8875 * @param GCPtrMem The address of the guest memory.
8876 * @param fAccess How the memory is being accessed. The
8877 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8878 * how to map the memory, while the
8879 * IEM_ACCESS_WHAT_XXX bit is used when raising
8880 * exceptions.
8881 */
8882IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8883{
8884 /*
8885 * Check the input and figure out which mapping entry to use.
8886 */
8887 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8888 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8889 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8890
8891 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8892 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8893 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8894 {
8895 iMemMap = iemMemMapFindFree(pVCpu);
8896 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8897 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8898 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8899 pVCpu->iem.s.aMemMappings[2].fAccess),
8900 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8901 }
8902
8903 /*
8904 * Map the memory, checking that we can actually access it. If something
8905 * slightly complicated happens, fall back on bounce buffering.
8906 */
8907 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8908 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8909 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8910
8911 /* Crossing a page boundary? */
8912 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8913 { /* No (likely). */ }
8914 else
8915 {
8916 void *pvMem;
8917 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8918 if (rcStrict == VINF_SUCCESS)
8919 return pvMem;
8920 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8921 }
8922
8923 RTGCPHYS GCPhysFirst;
8924 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8925 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8926 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8927
8928 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8929 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8930 if (fAccess & IEM_ACCESS_TYPE_READ)
8931 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8932
8933 void *pvMem;
8934 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8935 if (rcStrict == VINF_SUCCESS)
8936 { /* likely */ }
8937 else
8938 {
8939 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8940 if (rcStrict == VINF_SUCCESS)
8941 return pvMem;
8942 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8943 }
8944
8945 /*
8946 * Fill in the mapping table entry.
8947 */
8948 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8949 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8950 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8951 pVCpu->iem.s.cActiveMappings++;
8952
8953 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8954 return pvMem;
8955}
8956
8957
8958/**
8959 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8960 *
8961 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8962 * @param pvMem The mapping.
8963 * @param fAccess The kind of access.
8964 */
8965IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8966{
8967 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8968 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8969
8970 /* If it's bounce buffered, we may need to write back the buffer. */
8971 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8972 {
8973 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8974 {
8975 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8976 if (rcStrict == VINF_SUCCESS)
8977 return;
8978 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8979 }
8980 }
8981 /* Otherwise unlock it. */
8982 else
8983 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8984
8985 /* Free the entry. */
8986 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8987 Assert(pVCpu->iem.s.cActiveMappings != 0);
8988 pVCpu->iem.s.cActiveMappings--;
8989}
8990
8991#endif /* IEM_WITH_SETJMP */
8992
8993#ifndef IN_RING3
8994/**
8995 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
8996 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
8997 *
8998 * Allows the instruction to be completed and retired, while the IEM user will
8999 * return to ring-3 immediately afterwards and do the postponed writes there.
9000 *
9001 * @returns VBox status code (no strict statuses). Caller must check
9002 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9003 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9004 * @param pvMem The mapping.
9005 * @param fAccess The kind of access.
9006 */
9007IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9008{
9009 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9010 AssertReturn(iMemMap >= 0, iMemMap);
9011
9012 /* If it's bounce buffered, we may need to write back the buffer. */
9013 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9014 {
9015 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9016 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9017 }
9018 /* Otherwise unlock it. */
9019 else
9020 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9021
9022 /* Free the entry. */
9023 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9024 Assert(pVCpu->iem.s.cActiveMappings != 0);
9025 pVCpu->iem.s.cActiveMappings--;
9026 return VINF_SUCCESS;
9027}
9028#endif
9029
9030
9031/**
9032 * Rollbacks mappings, releasing page locks and such.
9033 *
9034 * The caller shall only call this after checking cActiveMappings.
9035 *
9036 * @returns Strict VBox status code to pass up.
9037 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9038 */
9039IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9040{
9041 Assert(pVCpu->iem.s.cActiveMappings > 0);
9042
9043 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9044 while (iMemMap-- > 0)
9045 {
9046 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9047 if (fAccess != IEM_ACCESS_INVALID)
9048 {
9049 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9050 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9051 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9052 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9053 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9054 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9055 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9056 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9057 pVCpu->iem.s.cActiveMappings--;
9058 }
9059 }
9060}
9061
9062
9063/**
9064 * Fetches a data byte.
9065 *
9066 * @returns Strict VBox status code.
9067 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9068 * @param pu8Dst Where to return the byte.
9069 * @param iSegReg The index of the segment register to use for
9070 * this access. The base and limits are checked.
9071 * @param GCPtrMem The address of the guest memory.
9072 */
9073IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9074{
9075 /* The lazy approach for now... */
9076 uint8_t const *pu8Src;
9077 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9078 if (rc == VINF_SUCCESS)
9079 {
9080 *pu8Dst = *pu8Src;
9081 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9082 }
9083 return rc;
9084}
9085
9086
9087#ifdef IEM_WITH_SETJMP
9088/**
9089 * Fetches a data byte, longjmp on error.
9090 *
9091 * @returns The byte.
9092 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9093 * @param iSegReg The index of the segment register to use for
9094 * this access. The base and limits are checked.
9095 * @param GCPtrMem The address of the guest memory.
9096 */
9097DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9098{
9099 /* The lazy approach for now... */
9100 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9101 uint8_t const bRet = *pu8Src;
9102 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9103 return bRet;
9104}
9105#endif /* IEM_WITH_SETJMP */
9106
9107
9108/**
9109 * Fetches a data word.
9110 *
9111 * @returns Strict VBox status code.
9112 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9113 * @param pu16Dst Where to return the word.
9114 * @param iSegReg The index of the segment register to use for
9115 * this access. The base and limits are checked.
9116 * @param GCPtrMem The address of the guest memory.
9117 */
9118IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9119{
9120 /* The lazy approach for now... */
9121 uint16_t const *pu16Src;
9122 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9123 if (rc == VINF_SUCCESS)
9124 {
9125 *pu16Dst = *pu16Src;
9126 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9127 }
9128 return rc;
9129}
9130
9131
9132#ifdef IEM_WITH_SETJMP
9133/**
9134 * Fetches a data word, longjmp on error.
9135 *
9136 * @returns The word
9137 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9138 * @param iSegReg The index of the segment register to use for
9139 * this access. The base and limits are checked.
9140 * @param GCPtrMem The address of the guest memory.
9141 */
9142DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9143{
9144 /* The lazy approach for now... */
9145 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9146 uint16_t const u16Ret = *pu16Src;
9147 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9148 return u16Ret;
9149}
9150#endif
9151
9152
9153/**
9154 * Fetches a data dword.
9155 *
9156 * @returns Strict VBox status code.
9157 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9158 * @param pu32Dst Where to return the dword.
9159 * @param iSegReg The index of the segment register to use for
9160 * this access. The base and limits are checked.
9161 * @param GCPtrMem The address of the guest memory.
9162 */
9163IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9164{
9165 /* The lazy approach for now... */
9166 uint32_t const *pu32Src;
9167 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9168 if (rc == VINF_SUCCESS)
9169 {
9170 *pu32Dst = *pu32Src;
9171 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9172 }
9173 return rc;
9174}
9175
9176
9177/**
9178 * Fetches a data dword and zero extends it to a qword.
9179 *
9180 * @returns Strict VBox status code.
9181 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9182 * @param pu64Dst Where to return the qword.
9183 * @param iSegReg The index of the segment register to use for
9184 * this access. The base and limits are checked.
9185 * @param GCPtrMem The address of the guest memory.
9186 */
9187IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9188{
9189 /* The lazy approach for now... */
9190 uint32_t const *pu32Src;
9191 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9192 if (rc == VINF_SUCCESS)
9193 {
9194 *pu64Dst = *pu32Src;
9195 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9196 }
9197 return rc;
9198}
9199
9200
9201#ifdef IEM_WITH_SETJMP
9202
9203IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9204{
9205 Assert(cbMem >= 1);
9206 Assert(iSegReg < X86_SREG_COUNT);
9207
9208 /*
9209 * 64-bit mode is simpler.
9210 */
9211 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9212 {
9213 if (iSegReg >= X86_SREG_FS)
9214 {
9215 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9216 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9217 GCPtrMem += pSel->u64Base;
9218 }
9219
9220 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9221 return GCPtrMem;
9222 }
9223 /*
9224 * 16-bit and 32-bit segmentation.
9225 */
9226 else
9227 {
9228 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9229 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9230 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9231 == X86DESCATTR_P /* data, expand up */
9232 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9233 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9234 {
9235 /* expand up */
9236 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9237 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9238 && GCPtrLast32 > (uint32_t)GCPtrMem))
9239 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9240 }
9241 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9242 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9243 {
9244 /* expand down */
9245 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9246 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9247 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9248 && GCPtrLast32 > (uint32_t)GCPtrMem))
9249 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9250 }
9251 else
9252 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9253 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9254 }
9255 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9256}
9257
9258
9259IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9260{
9261 Assert(cbMem >= 1);
9262 Assert(iSegReg < X86_SREG_COUNT);
9263
9264 /*
9265 * 64-bit mode is simpler.
9266 */
9267 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9268 {
9269 if (iSegReg >= X86_SREG_FS)
9270 {
9271 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9272 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9273 GCPtrMem += pSel->u64Base;
9274 }
9275
9276 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9277 return GCPtrMem;
9278 }
9279 /*
9280 * 16-bit and 32-bit segmentation.
9281 */
9282 else
9283 {
9284 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9285 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9286 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9287 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9288 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9289 {
9290 /* expand up */
9291 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9292 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9293 && GCPtrLast32 > (uint32_t)GCPtrMem))
9294 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9295 }
9296 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9297 {
9298 /* expand down */
9299 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9300 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9301 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9302 && GCPtrLast32 > (uint32_t)GCPtrMem))
9303 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9304 }
9305 else
9306 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9307 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9308 }
9309 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9310}
9311
9312
9313/**
9314 * Fetches a data dword, longjmp on error, fallback/safe version.
9315 *
9316 * @returns The dword
9317 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9318 * @param iSegReg The index of the segment register to use for
9319 * this access. The base and limits are checked.
9320 * @param GCPtrMem The address of the guest memory.
9321 */
9322IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9323{
9324 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9325 uint32_t const u32Ret = *pu32Src;
9326 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9327 return u32Ret;
9328}
9329
9330
9331/**
9332 * Fetches a data dword, longjmp on error.
9333 *
9334 * @returns The dword
9335 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9336 * @param iSegReg The index of the segment register to use for
9337 * this access. The base and limits are checked.
9338 * @param GCPtrMem The address of the guest memory.
9339 */
9340DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9341{
9342# ifdef IEM_WITH_DATA_TLB
9343 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9344 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9345 {
9346 /// @todo more later.
9347 }
9348
9349 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9350# else
9351 /* The lazy approach. */
9352 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9353 uint32_t const u32Ret = *pu32Src;
9354 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9355 return u32Ret;
9356# endif
9357}
9358#endif
9359
9360
9361#ifdef SOME_UNUSED_FUNCTION
9362/**
9363 * Fetches a data dword and sign extends it to a qword.
9364 *
9365 * @returns Strict VBox status code.
9366 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9367 * @param pu64Dst Where to return the sign extended value.
9368 * @param iSegReg The index of the segment register to use for
9369 * this access. The base and limits are checked.
9370 * @param GCPtrMem The address of the guest memory.
9371 */
9372IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9373{
9374 /* The lazy approach for now... */
9375 int32_t const *pi32Src;
9376 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9377 if (rc == VINF_SUCCESS)
9378 {
9379 *pu64Dst = *pi32Src;
9380 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9381 }
9382#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9383 else
9384 *pu64Dst = 0;
9385#endif
9386 return rc;
9387}
9388#endif
9389
9390
9391/**
9392 * Fetches a data qword.
9393 *
9394 * @returns Strict VBox status code.
9395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9396 * @param pu64Dst Where to return the qword.
9397 * @param iSegReg The index of the segment register to use for
9398 * this access. The base and limits are checked.
9399 * @param GCPtrMem The address of the guest memory.
9400 */
9401IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9402{
9403 /* The lazy approach for now... */
9404 uint64_t const *pu64Src;
9405 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9406 if (rc == VINF_SUCCESS)
9407 {
9408 *pu64Dst = *pu64Src;
9409 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9410 }
9411 return rc;
9412}
9413
9414
9415#ifdef IEM_WITH_SETJMP
9416/**
9417 * Fetches a data qword, longjmp on error.
9418 *
9419 * @returns The qword.
9420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9421 * @param iSegReg The index of the segment register to use for
9422 * this access. The base and limits are checked.
9423 * @param GCPtrMem The address of the guest memory.
9424 */
9425DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9426{
9427 /* The lazy approach for now... */
9428 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9429 uint64_t const u64Ret = *pu64Src;
9430 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9431 return u64Ret;
9432}
9433#endif
9434
9435
9436/**
9437 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9438 *
9439 * @returns Strict VBox status code.
9440 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9441 * @param pu64Dst Where to return the qword.
9442 * @param iSegReg The index of the segment register to use for
9443 * this access. The base and limits are checked.
9444 * @param GCPtrMem The address of the guest memory.
9445 */
9446IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9447{
9448 /* The lazy approach for now... */
9449 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9450 if (RT_UNLIKELY(GCPtrMem & 15))
9451 return iemRaiseGeneralProtectionFault0(pVCpu);
9452
9453 uint64_t const *pu64Src;
9454 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9455 if (rc == VINF_SUCCESS)
9456 {
9457 *pu64Dst = *pu64Src;
9458 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9459 }
9460 return rc;
9461}
9462
9463
9464#ifdef IEM_WITH_SETJMP
9465/**
9466 * Fetches a data qword, longjmp on error.
9467 *
9468 * @returns The qword.
9469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9470 * @param iSegReg The index of the segment register to use for
9471 * this access. The base and limits are checked.
9472 * @param GCPtrMem The address of the guest memory.
9473 */
9474DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9475{
9476 /* The lazy approach for now... */
9477 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9478 if (RT_LIKELY(!(GCPtrMem & 15)))
9479 {
9480 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9481 uint64_t const u64Ret = *pu64Src;
9482 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9483 return u64Ret;
9484 }
9485
9486 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9487 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9488}
9489#endif
9490
9491
9492/**
9493 * Fetches a data tword.
9494 *
9495 * @returns Strict VBox status code.
9496 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9497 * @param pr80Dst Where to return the tword.
9498 * @param iSegReg The index of the segment register to use for
9499 * this access. The base and limits are checked.
9500 * @param GCPtrMem The address of the guest memory.
9501 */
9502IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9503{
9504 /* The lazy approach for now... */
9505 PCRTFLOAT80U pr80Src;
9506 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9507 if (rc == VINF_SUCCESS)
9508 {
9509 *pr80Dst = *pr80Src;
9510 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9511 }
9512 return rc;
9513}
9514
9515
9516#ifdef IEM_WITH_SETJMP
9517/**
9518 * Fetches a data tword, longjmp on error.
9519 *
9520 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9521 * @param pr80Dst Where to return the tword.
9522 * @param iSegReg The index of the segment register to use for
9523 * this access. The base and limits are checked.
9524 * @param GCPtrMem The address of the guest memory.
9525 */
9526DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9527{
9528 /* The lazy approach for now... */
9529 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9530 *pr80Dst = *pr80Src;
9531 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9532}
9533#endif
9534
9535
9536/**
9537 * Fetches a data dqword (double qword), generally SSE related.
9538 *
9539 * @returns Strict VBox status code.
9540 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9541 * @param pu128Dst Where to return the qword.
9542 * @param iSegReg The index of the segment register to use for
9543 * this access. The base and limits are checked.
9544 * @param GCPtrMem The address of the guest memory.
9545 */
9546IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9547{
9548 /* The lazy approach for now... */
9549 PCRTUINT128U pu128Src;
9550 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9551 if (rc == VINF_SUCCESS)
9552 {
9553 pu128Dst->au64[0] = pu128Src->au64[0];
9554 pu128Dst->au64[1] = pu128Src->au64[1];
9555 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9556 }
9557 return rc;
9558}
9559
9560
9561#ifdef IEM_WITH_SETJMP
9562/**
9563 * Fetches a data dqword (double qword), generally SSE related.
9564 *
9565 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9566 * @param pu128Dst Where to return the qword.
9567 * @param iSegReg The index of the segment register to use for
9568 * this access. The base and limits are checked.
9569 * @param GCPtrMem The address of the guest memory.
9570 */
9571IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9572{
9573 /* The lazy approach for now... */
9574 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9575 pu128Dst->au64[0] = pu128Src->au64[0];
9576 pu128Dst->au64[1] = pu128Src->au64[1];
9577 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9578}
9579#endif
9580
9581
9582/**
9583 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9584 * related.
9585 *
9586 * Raises \#GP(0) if not aligned.
9587 *
9588 * @returns Strict VBox status code.
9589 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9590 * @param pu128Dst Where to return the qword.
9591 * @param iSegReg The index of the segment register to use for
9592 * this access. The base and limits are checked.
9593 * @param GCPtrMem The address of the guest memory.
9594 */
9595IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9596{
9597 /* The lazy approach for now... */
9598 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9599 if ( (GCPtrMem & 15)
9600 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9601 return iemRaiseGeneralProtectionFault0(pVCpu);
9602
9603 PCRTUINT128U pu128Src;
9604 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9605 if (rc == VINF_SUCCESS)
9606 {
9607 pu128Dst->au64[0] = pu128Src->au64[0];
9608 pu128Dst->au64[1] = pu128Src->au64[1];
9609 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9610 }
9611 return rc;
9612}
9613
9614
9615#ifdef IEM_WITH_SETJMP
9616/**
9617 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9618 * related, longjmp on error.
9619 *
9620 * Raises \#GP(0) if not aligned.
9621 *
9622 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9623 * @param pu128Dst Where to return the qword.
9624 * @param iSegReg The index of the segment register to use for
9625 * this access. The base and limits are checked.
9626 * @param GCPtrMem The address of the guest memory.
9627 */
9628DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9629{
9630 /* The lazy approach for now... */
9631 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9632 if ( (GCPtrMem & 15) == 0
9633 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9634 {
9635 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9636 pu128Dst->au64[0] = pu128Src->au64[0];
9637 pu128Dst->au64[1] = pu128Src->au64[1];
9638 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9639 return;
9640 }
9641
9642 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9643 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9644}
9645#endif
9646
9647
9648/**
9649 * Fetches a data oword (octo word), generally AVX related.
9650 *
9651 * @returns Strict VBox status code.
9652 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9653 * @param pu256Dst Where to return the qword.
9654 * @param iSegReg The index of the segment register to use for
9655 * this access. The base and limits are checked.
9656 * @param GCPtrMem The address of the guest memory.
9657 */
9658IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9659{
9660 /* The lazy approach for now... */
9661 PCRTUINT256U pu256Src;
9662 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9663 if (rc == VINF_SUCCESS)
9664 {
9665 pu256Dst->au64[0] = pu256Src->au64[0];
9666 pu256Dst->au64[1] = pu256Src->au64[1];
9667 pu256Dst->au64[2] = pu256Src->au64[2];
9668 pu256Dst->au64[3] = pu256Src->au64[3];
9669 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9670 }
9671 return rc;
9672}
9673
9674
9675#ifdef IEM_WITH_SETJMP
9676/**
9677 * Fetches a data oword (octo word), generally AVX related.
9678 *
9679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9680 * @param pu256Dst Where to return the qword.
9681 * @param iSegReg The index of the segment register to use for
9682 * this access. The base and limits are checked.
9683 * @param GCPtrMem The address of the guest memory.
9684 */
9685IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9686{
9687 /* The lazy approach for now... */
9688 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9689 pu256Dst->au64[0] = pu256Src->au64[0];
9690 pu256Dst->au64[1] = pu256Src->au64[1];
9691 pu256Dst->au64[2] = pu256Src->au64[2];
9692 pu256Dst->au64[3] = pu256Src->au64[3];
9693 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9694}
9695#endif
9696
9697
9698/**
9699 * Fetches a data oword (octo word) at an aligned address, generally AVX
9700 * related.
9701 *
9702 * Raises \#GP(0) if not aligned.
9703 *
9704 * @returns Strict VBox status code.
9705 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9706 * @param pu256Dst Where to return the qword.
9707 * @param iSegReg The index of the segment register to use for
9708 * this access. The base and limits are checked.
9709 * @param GCPtrMem The address of the guest memory.
9710 */
9711IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9712{
9713 /* The lazy approach for now... */
9714 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9715 if (GCPtrMem & 31)
9716 return iemRaiseGeneralProtectionFault0(pVCpu);
9717
9718 PCRTUINT256U pu256Src;
9719 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9720 if (rc == VINF_SUCCESS)
9721 {
9722 pu256Dst->au64[0] = pu256Src->au64[0];
9723 pu256Dst->au64[1] = pu256Src->au64[1];
9724 pu256Dst->au64[2] = pu256Src->au64[2];
9725 pu256Dst->au64[3] = pu256Src->au64[3];
9726 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9727 }
9728 return rc;
9729}
9730
9731
9732#ifdef IEM_WITH_SETJMP
9733/**
9734 * Fetches a data oword (octo word) at an aligned address, generally AVX
9735 * related, longjmp on error.
9736 *
9737 * Raises \#GP(0) if not aligned.
9738 *
9739 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9740 * @param pu256Dst Where to return the qword.
9741 * @param iSegReg The index of the segment register to use for
9742 * this access. The base and limits are checked.
9743 * @param GCPtrMem The address of the guest memory.
9744 */
9745DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9746{
9747 /* The lazy approach for now... */
9748 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9749 if ((GCPtrMem & 31) == 0)
9750 {
9751 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9752 pu256Dst->au64[0] = pu256Src->au64[0];
9753 pu256Dst->au64[1] = pu256Src->au64[1];
9754 pu256Dst->au64[2] = pu256Src->au64[2];
9755 pu256Dst->au64[3] = pu256Src->au64[3];
9756 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9757 return;
9758 }
9759
9760 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9761 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9762}
9763#endif
9764
9765
9766
9767/**
9768 * Fetches a descriptor register (lgdt, lidt).
9769 *
9770 * @returns Strict VBox status code.
9771 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9772 * @param pcbLimit Where to return the limit.
9773 * @param pGCPtrBase Where to return the base.
9774 * @param iSegReg The index of the segment register to use for
9775 * this access. The base and limits are checked.
9776 * @param GCPtrMem The address of the guest memory.
9777 * @param enmOpSize The effective operand size.
9778 */
9779IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9780 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9781{
9782 /*
9783 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9784 * little special:
9785 * - The two reads are done separately.
9786 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9787 * - We suspect the 386 to actually commit the limit before the base in
9788 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9789 * don't try emulate this eccentric behavior, because it's not well
9790 * enough understood and rather hard to trigger.
9791 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9792 */
9793 VBOXSTRICTRC rcStrict;
9794 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9795 {
9796 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9797 if (rcStrict == VINF_SUCCESS)
9798 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9799 }
9800 else
9801 {
9802 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9803 if (enmOpSize == IEMMODE_32BIT)
9804 {
9805 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9806 {
9807 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9808 if (rcStrict == VINF_SUCCESS)
9809 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9810 }
9811 else
9812 {
9813 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9814 if (rcStrict == VINF_SUCCESS)
9815 {
9816 *pcbLimit = (uint16_t)uTmp;
9817 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9818 }
9819 }
9820 if (rcStrict == VINF_SUCCESS)
9821 *pGCPtrBase = uTmp;
9822 }
9823 else
9824 {
9825 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9826 if (rcStrict == VINF_SUCCESS)
9827 {
9828 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9829 if (rcStrict == VINF_SUCCESS)
9830 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9831 }
9832 }
9833 }
9834 return rcStrict;
9835}
9836
9837
9838
9839/**
9840 * Stores a data byte.
9841 *
9842 * @returns Strict VBox status code.
9843 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9844 * @param iSegReg The index of the segment register to use for
9845 * this access. The base and limits are checked.
9846 * @param GCPtrMem The address of the guest memory.
9847 * @param u8Value The value to store.
9848 */
9849IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9850{
9851 /* The lazy approach for now... */
9852 uint8_t *pu8Dst;
9853 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9854 if (rc == VINF_SUCCESS)
9855 {
9856 *pu8Dst = u8Value;
9857 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9858 }
9859 return rc;
9860}
9861
9862
9863#ifdef IEM_WITH_SETJMP
9864/**
9865 * Stores a data byte, longjmp on error.
9866 *
9867 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9868 * @param iSegReg The index of the segment register to use for
9869 * this access. The base and limits are checked.
9870 * @param GCPtrMem The address of the guest memory.
9871 * @param u8Value The value to store.
9872 */
9873IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9874{
9875 /* The lazy approach for now... */
9876 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9877 *pu8Dst = u8Value;
9878 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9879}
9880#endif
9881
9882
9883/**
9884 * Stores a data word.
9885 *
9886 * @returns Strict VBox status code.
9887 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9888 * @param iSegReg The index of the segment register to use for
9889 * this access. The base and limits are checked.
9890 * @param GCPtrMem The address of the guest memory.
9891 * @param u16Value The value to store.
9892 */
9893IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9894{
9895 /* The lazy approach for now... */
9896 uint16_t *pu16Dst;
9897 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9898 if (rc == VINF_SUCCESS)
9899 {
9900 *pu16Dst = u16Value;
9901 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9902 }
9903 return rc;
9904}
9905
9906
9907#ifdef IEM_WITH_SETJMP
9908/**
9909 * Stores a data word, longjmp on error.
9910 *
9911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9912 * @param iSegReg The index of the segment register to use for
9913 * this access. The base and limits are checked.
9914 * @param GCPtrMem The address of the guest memory.
9915 * @param u16Value The value to store.
9916 */
9917IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9918{
9919 /* The lazy approach for now... */
9920 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9921 *pu16Dst = u16Value;
9922 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9923}
9924#endif
9925
9926
9927/**
9928 * Stores a data dword.
9929 *
9930 * @returns Strict VBox status code.
9931 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9932 * @param iSegReg The index of the segment register to use for
9933 * this access. The base and limits are checked.
9934 * @param GCPtrMem The address of the guest memory.
9935 * @param u32Value The value to store.
9936 */
9937IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9938{
9939 /* The lazy approach for now... */
9940 uint32_t *pu32Dst;
9941 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9942 if (rc == VINF_SUCCESS)
9943 {
9944 *pu32Dst = u32Value;
9945 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9946 }
9947 return rc;
9948}
9949
9950
9951#ifdef IEM_WITH_SETJMP
9952/**
9953 * Stores a data dword.
9954 *
9955 * @returns Strict VBox status code.
9956 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9957 * @param iSegReg The index of the segment register to use for
9958 * this access. The base and limits are checked.
9959 * @param GCPtrMem The address of the guest memory.
9960 * @param u32Value The value to store.
9961 */
9962IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9963{
9964 /* The lazy approach for now... */
9965 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9966 *pu32Dst = u32Value;
9967 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9968}
9969#endif
9970
9971
9972/**
9973 * Stores a data qword.
9974 *
9975 * @returns Strict VBox status code.
9976 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9977 * @param iSegReg The index of the segment register to use for
9978 * this access. The base and limits are checked.
9979 * @param GCPtrMem The address of the guest memory.
9980 * @param u64Value The value to store.
9981 */
9982IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
9983{
9984 /* The lazy approach for now... */
9985 uint64_t *pu64Dst;
9986 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9987 if (rc == VINF_SUCCESS)
9988 {
9989 *pu64Dst = u64Value;
9990 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
9991 }
9992 return rc;
9993}
9994
9995
9996#ifdef IEM_WITH_SETJMP
9997/**
9998 * Stores a data qword, longjmp on error.
9999 *
10000 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10001 * @param iSegReg The index of the segment register to use for
10002 * this access. The base and limits are checked.
10003 * @param GCPtrMem The address of the guest memory.
10004 * @param u64Value The value to store.
10005 */
10006IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10007{
10008 /* The lazy approach for now... */
10009 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10010 *pu64Dst = u64Value;
10011 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10012}
10013#endif
10014
10015
10016/**
10017 * Stores a data dqword.
10018 *
10019 * @returns Strict VBox status code.
10020 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10021 * @param iSegReg The index of the segment register to use for
10022 * this access. The base and limits are checked.
10023 * @param GCPtrMem The address of the guest memory.
10024 * @param u128Value The value to store.
10025 */
10026IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10027{
10028 /* The lazy approach for now... */
10029 PRTUINT128U pu128Dst;
10030 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10031 if (rc == VINF_SUCCESS)
10032 {
10033 pu128Dst->au64[0] = u128Value.au64[0];
10034 pu128Dst->au64[1] = u128Value.au64[1];
10035 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10036 }
10037 return rc;
10038}
10039
10040
10041#ifdef IEM_WITH_SETJMP
10042/**
10043 * Stores a data dqword, longjmp on error.
10044 *
10045 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10046 * @param iSegReg The index of the segment register to use for
10047 * this access. The base and limits are checked.
10048 * @param GCPtrMem The address of the guest memory.
10049 * @param u128Value The value to store.
10050 */
10051IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10052{
10053 /* The lazy approach for now... */
10054 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10055 pu128Dst->au64[0] = u128Value.au64[0];
10056 pu128Dst->au64[1] = u128Value.au64[1];
10057 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10058}
10059#endif
10060
10061
10062/**
10063 * Stores a data dqword, SSE aligned.
10064 *
10065 * @returns Strict VBox status code.
10066 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10067 * @param iSegReg The index of the segment register to use for
10068 * this access. The base and limits are checked.
10069 * @param GCPtrMem The address of the guest memory.
10070 * @param u128Value The value to store.
10071 */
10072IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10073{
10074 /* The lazy approach for now... */
10075 if ( (GCPtrMem & 15)
10076 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10077 return iemRaiseGeneralProtectionFault0(pVCpu);
10078
10079 PRTUINT128U pu128Dst;
10080 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10081 if (rc == VINF_SUCCESS)
10082 {
10083 pu128Dst->au64[0] = u128Value.au64[0];
10084 pu128Dst->au64[1] = u128Value.au64[1];
10085 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10086 }
10087 return rc;
10088}
10089
10090
10091#ifdef IEM_WITH_SETJMP
10092/**
10093 * Stores a data dqword, SSE aligned.
10094 *
10095 * @returns Strict VBox status code.
10096 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10097 * @param iSegReg The index of the segment register to use for
10098 * this access. The base and limits are checked.
10099 * @param GCPtrMem The address of the guest memory.
10100 * @param u128Value The value to store.
10101 */
10102DECL_NO_INLINE(IEM_STATIC, void)
10103iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10104{
10105 /* The lazy approach for now... */
10106 if ( (GCPtrMem & 15) == 0
10107 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10108 {
10109 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10110 pu128Dst->au64[0] = u128Value.au64[0];
10111 pu128Dst->au64[1] = u128Value.au64[1];
10112 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10113 return;
10114 }
10115
10116 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10117 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10118}
10119#endif
10120
10121
10122/**
10123 * Stores a data dqword.
10124 *
10125 * @returns Strict VBox status code.
10126 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10127 * @param iSegReg The index of the segment register to use for
10128 * this access. The base and limits are checked.
10129 * @param GCPtrMem The address of the guest memory.
10130 * @param pu256Value Pointer to the value to store.
10131 */
10132IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10133{
10134 /* The lazy approach for now... */
10135 PRTUINT256U pu256Dst;
10136 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10137 if (rc == VINF_SUCCESS)
10138 {
10139 pu256Dst->au64[0] = pu256Value->au64[0];
10140 pu256Dst->au64[1] = pu256Value->au64[1];
10141 pu256Dst->au64[2] = pu256Value->au64[2];
10142 pu256Dst->au64[3] = pu256Value->au64[3];
10143 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10144 }
10145 return rc;
10146}
10147
10148
10149#ifdef IEM_WITH_SETJMP
10150/**
10151 * Stores a data dqword, longjmp on error.
10152 *
10153 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10154 * @param iSegReg The index of the segment register to use for
10155 * this access. The base and limits are checked.
10156 * @param GCPtrMem The address of the guest memory.
10157 * @param pu256Value Pointer to the value to store.
10158 */
10159IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10160{
10161 /* The lazy approach for now... */
10162 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10163 pu256Dst->au64[0] = pu256Value->au64[0];
10164 pu256Dst->au64[1] = pu256Value->au64[1];
10165 pu256Dst->au64[2] = pu256Value->au64[2];
10166 pu256Dst->au64[3] = pu256Value->au64[3];
10167 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10168}
10169#endif
10170
10171
10172/**
10173 * Stores a data dqword, AVX aligned.
10174 *
10175 * @returns Strict VBox status code.
10176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10177 * @param iSegReg The index of the segment register to use for
10178 * this access. The base and limits are checked.
10179 * @param GCPtrMem The address of the guest memory.
10180 * @param pu256Value Pointer to the value to store.
10181 */
10182IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10183{
10184 /* The lazy approach for now... */
10185 if (GCPtrMem & 31)
10186 return iemRaiseGeneralProtectionFault0(pVCpu);
10187
10188 PRTUINT256U pu256Dst;
10189 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10190 if (rc == VINF_SUCCESS)
10191 {
10192 pu256Dst->au64[0] = pu256Value->au64[0];
10193 pu256Dst->au64[1] = pu256Value->au64[1];
10194 pu256Dst->au64[2] = pu256Value->au64[2];
10195 pu256Dst->au64[3] = pu256Value->au64[3];
10196 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10197 }
10198 return rc;
10199}
10200
10201
10202#ifdef IEM_WITH_SETJMP
10203/**
10204 * Stores a data dqword, AVX aligned.
10205 *
10206 * @returns Strict VBox status code.
10207 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10208 * @param iSegReg The index of the segment register to use for
10209 * this access. The base and limits are checked.
10210 * @param GCPtrMem The address of the guest memory.
10211 * @param pu256Value Pointer to the value to store.
10212 */
10213DECL_NO_INLINE(IEM_STATIC, void)
10214iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10215{
10216 /* The lazy approach for now... */
10217 if ((GCPtrMem & 31) == 0)
10218 {
10219 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10220 pu256Dst->au64[0] = pu256Value->au64[0];
10221 pu256Dst->au64[1] = pu256Value->au64[1];
10222 pu256Dst->au64[2] = pu256Value->au64[2];
10223 pu256Dst->au64[3] = pu256Value->au64[3];
10224 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10225 return;
10226 }
10227
10228 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10229 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10230}
10231#endif
10232
10233
10234/**
10235 * Stores a descriptor register (sgdt, sidt).
10236 *
10237 * @returns Strict VBox status code.
10238 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10239 * @param cbLimit The limit.
10240 * @param GCPtrBase The base address.
10241 * @param iSegReg The index of the segment register to use for
10242 * this access. The base and limits are checked.
10243 * @param GCPtrMem The address of the guest memory.
10244 */
10245IEM_STATIC VBOXSTRICTRC
10246iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10247{
10248 /*
10249 * The SIDT and SGDT instructions actually stores the data using two
10250 * independent writes. The instructions does not respond to opsize prefixes.
10251 */
10252 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10253 if (rcStrict == VINF_SUCCESS)
10254 {
10255 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10256 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10257 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10258 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10259 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10260 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10261 else
10262 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10263 }
10264 return rcStrict;
10265}
10266
10267
10268/**
10269 * Pushes a word onto the stack.
10270 *
10271 * @returns Strict VBox status code.
10272 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10273 * @param u16Value The value to push.
10274 */
10275IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10276{
10277 /* Increment the stack pointer. */
10278 uint64_t uNewRsp;
10279 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10280
10281 /* Write the word the lazy way. */
10282 uint16_t *pu16Dst;
10283 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10284 if (rc == VINF_SUCCESS)
10285 {
10286 *pu16Dst = u16Value;
10287 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10288 }
10289
10290 /* Commit the new RSP value unless we an access handler made trouble. */
10291 if (rc == VINF_SUCCESS)
10292 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10293
10294 return rc;
10295}
10296
10297
10298/**
10299 * Pushes a dword onto the stack.
10300 *
10301 * @returns Strict VBox status code.
10302 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10303 * @param u32Value The value to push.
10304 */
10305IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10306{
10307 /* Increment the stack pointer. */
10308 uint64_t uNewRsp;
10309 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10310
10311 /* Write the dword the lazy way. */
10312 uint32_t *pu32Dst;
10313 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10314 if (rc == VINF_SUCCESS)
10315 {
10316 *pu32Dst = u32Value;
10317 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10318 }
10319
10320 /* Commit the new RSP value unless we an access handler made trouble. */
10321 if (rc == VINF_SUCCESS)
10322 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10323
10324 return rc;
10325}
10326
10327
10328/**
10329 * Pushes a dword segment register value onto the stack.
10330 *
10331 * @returns Strict VBox status code.
10332 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10333 * @param u32Value The value to push.
10334 */
10335IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10336{
10337 /* Increment the stack pointer. */
10338 uint64_t uNewRsp;
10339 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10340
10341 /* The intel docs talks about zero extending the selector register
10342 value. My actual intel CPU here might be zero extending the value
10343 but it still only writes the lower word... */
10344 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10345 * happens when crossing an electric page boundrary, is the high word checked
10346 * for write accessibility or not? Probably it is. What about segment limits?
10347 * It appears this behavior is also shared with trap error codes.
10348 *
10349 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10350 * ancient hardware when it actually did change. */
10351 uint16_t *pu16Dst;
10352 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10353 if (rc == VINF_SUCCESS)
10354 {
10355 *pu16Dst = (uint16_t)u32Value;
10356 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10357 }
10358
10359 /* Commit the new RSP value unless we an access handler made trouble. */
10360 if (rc == VINF_SUCCESS)
10361 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10362
10363 return rc;
10364}
10365
10366
10367/**
10368 * Pushes a qword onto the stack.
10369 *
10370 * @returns Strict VBox status code.
10371 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10372 * @param u64Value The value to push.
10373 */
10374IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10375{
10376 /* Increment the stack pointer. */
10377 uint64_t uNewRsp;
10378 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10379
10380 /* Write the word the lazy way. */
10381 uint64_t *pu64Dst;
10382 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10383 if (rc == VINF_SUCCESS)
10384 {
10385 *pu64Dst = u64Value;
10386 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10387 }
10388
10389 /* Commit the new RSP value unless we an access handler made trouble. */
10390 if (rc == VINF_SUCCESS)
10391 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10392
10393 return rc;
10394}
10395
10396
10397/**
10398 * Pops a word from the stack.
10399 *
10400 * @returns Strict VBox status code.
10401 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10402 * @param pu16Value Where to store the popped value.
10403 */
10404IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10405{
10406 /* Increment the stack pointer. */
10407 uint64_t uNewRsp;
10408 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10409
10410 /* Write the word the lazy way. */
10411 uint16_t const *pu16Src;
10412 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10413 if (rc == VINF_SUCCESS)
10414 {
10415 *pu16Value = *pu16Src;
10416 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10417
10418 /* Commit the new RSP value. */
10419 if (rc == VINF_SUCCESS)
10420 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10421 }
10422
10423 return rc;
10424}
10425
10426
10427/**
10428 * Pops a dword from the stack.
10429 *
10430 * @returns Strict VBox status code.
10431 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10432 * @param pu32Value Where to store the popped value.
10433 */
10434IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10435{
10436 /* Increment the stack pointer. */
10437 uint64_t uNewRsp;
10438 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10439
10440 /* Write the word the lazy way. */
10441 uint32_t const *pu32Src;
10442 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10443 if (rc == VINF_SUCCESS)
10444 {
10445 *pu32Value = *pu32Src;
10446 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10447
10448 /* Commit the new RSP value. */
10449 if (rc == VINF_SUCCESS)
10450 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10451 }
10452
10453 return rc;
10454}
10455
10456
10457/**
10458 * Pops a qword from the stack.
10459 *
10460 * @returns Strict VBox status code.
10461 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10462 * @param pu64Value Where to store the popped value.
10463 */
10464IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10465{
10466 /* Increment the stack pointer. */
10467 uint64_t uNewRsp;
10468 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10469
10470 /* Write the word the lazy way. */
10471 uint64_t const *pu64Src;
10472 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10473 if (rc == VINF_SUCCESS)
10474 {
10475 *pu64Value = *pu64Src;
10476 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10477
10478 /* Commit the new RSP value. */
10479 if (rc == VINF_SUCCESS)
10480 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10481 }
10482
10483 return rc;
10484}
10485
10486
10487/**
10488 * Pushes a word onto the stack, using a temporary stack pointer.
10489 *
10490 * @returns Strict VBox status code.
10491 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10492 * @param u16Value The value to push.
10493 * @param pTmpRsp Pointer to the temporary stack pointer.
10494 */
10495IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10496{
10497 /* Increment the stack pointer. */
10498 RTUINT64U NewRsp = *pTmpRsp;
10499 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10500
10501 /* Write the word the lazy way. */
10502 uint16_t *pu16Dst;
10503 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10504 if (rc == VINF_SUCCESS)
10505 {
10506 *pu16Dst = u16Value;
10507 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10508 }
10509
10510 /* Commit the new RSP value unless we an access handler made trouble. */
10511 if (rc == VINF_SUCCESS)
10512 *pTmpRsp = NewRsp;
10513
10514 return rc;
10515}
10516
10517
10518/**
10519 * Pushes a dword onto the stack, using a temporary stack pointer.
10520 *
10521 * @returns Strict VBox status code.
10522 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10523 * @param u32Value The value to push.
10524 * @param pTmpRsp Pointer to the temporary stack pointer.
10525 */
10526IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10527{
10528 /* Increment the stack pointer. */
10529 RTUINT64U NewRsp = *pTmpRsp;
10530 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10531
10532 /* Write the word the lazy way. */
10533 uint32_t *pu32Dst;
10534 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10535 if (rc == VINF_SUCCESS)
10536 {
10537 *pu32Dst = u32Value;
10538 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10539 }
10540
10541 /* Commit the new RSP value unless we an access handler made trouble. */
10542 if (rc == VINF_SUCCESS)
10543 *pTmpRsp = NewRsp;
10544
10545 return rc;
10546}
10547
10548
10549/**
10550 * Pushes a dword onto the stack, using a temporary stack pointer.
10551 *
10552 * @returns Strict VBox status code.
10553 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10554 * @param u64Value The value to push.
10555 * @param pTmpRsp Pointer to the temporary stack pointer.
10556 */
10557IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10558{
10559 /* Increment the stack pointer. */
10560 RTUINT64U NewRsp = *pTmpRsp;
10561 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10562
10563 /* Write the word the lazy way. */
10564 uint64_t *pu64Dst;
10565 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10566 if (rc == VINF_SUCCESS)
10567 {
10568 *pu64Dst = u64Value;
10569 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10570 }
10571
10572 /* Commit the new RSP value unless we an access handler made trouble. */
10573 if (rc == VINF_SUCCESS)
10574 *pTmpRsp = NewRsp;
10575
10576 return rc;
10577}
10578
10579
10580/**
10581 * Pops a word from the stack, using a temporary stack pointer.
10582 *
10583 * @returns Strict VBox status code.
10584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10585 * @param pu16Value Where to store the popped value.
10586 * @param pTmpRsp Pointer to the temporary stack pointer.
10587 */
10588IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10589{
10590 /* Increment the stack pointer. */
10591 RTUINT64U NewRsp = *pTmpRsp;
10592 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10593
10594 /* Write the word the lazy way. */
10595 uint16_t const *pu16Src;
10596 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10597 if (rc == VINF_SUCCESS)
10598 {
10599 *pu16Value = *pu16Src;
10600 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10601
10602 /* Commit the new RSP value. */
10603 if (rc == VINF_SUCCESS)
10604 *pTmpRsp = NewRsp;
10605 }
10606
10607 return rc;
10608}
10609
10610
10611/**
10612 * Pops a dword from the stack, using a temporary stack pointer.
10613 *
10614 * @returns Strict VBox status code.
10615 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10616 * @param pu32Value Where to store the popped value.
10617 * @param pTmpRsp Pointer to the temporary stack pointer.
10618 */
10619IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10620{
10621 /* Increment the stack pointer. */
10622 RTUINT64U NewRsp = *pTmpRsp;
10623 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10624
10625 /* Write the word the lazy way. */
10626 uint32_t const *pu32Src;
10627 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10628 if (rc == VINF_SUCCESS)
10629 {
10630 *pu32Value = *pu32Src;
10631 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10632
10633 /* Commit the new RSP value. */
10634 if (rc == VINF_SUCCESS)
10635 *pTmpRsp = NewRsp;
10636 }
10637
10638 return rc;
10639}
10640
10641
10642/**
10643 * Pops a qword from the stack, using a temporary stack pointer.
10644 *
10645 * @returns Strict VBox status code.
10646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10647 * @param pu64Value Where to store the popped value.
10648 * @param pTmpRsp Pointer to the temporary stack pointer.
10649 */
10650IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10651{
10652 /* Increment the stack pointer. */
10653 RTUINT64U NewRsp = *pTmpRsp;
10654 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10655
10656 /* Write the word the lazy way. */
10657 uint64_t const *pu64Src;
10658 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10659 if (rcStrict == VINF_SUCCESS)
10660 {
10661 *pu64Value = *pu64Src;
10662 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10663
10664 /* Commit the new RSP value. */
10665 if (rcStrict == VINF_SUCCESS)
10666 *pTmpRsp = NewRsp;
10667 }
10668
10669 return rcStrict;
10670}
10671
10672
10673/**
10674 * Begin a special stack push (used by interrupt, exceptions and such).
10675 *
10676 * This will raise \#SS or \#PF if appropriate.
10677 *
10678 * @returns Strict VBox status code.
10679 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10680 * @param cbMem The number of bytes to push onto the stack.
10681 * @param ppvMem Where to return the pointer to the stack memory.
10682 * As with the other memory functions this could be
10683 * direct access or bounce buffered access, so
10684 * don't commit register until the commit call
10685 * succeeds.
10686 * @param puNewRsp Where to return the new RSP value. This must be
10687 * passed unchanged to
10688 * iemMemStackPushCommitSpecial().
10689 */
10690IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10691{
10692 Assert(cbMem < UINT8_MAX);
10693 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10694 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10695}
10696
10697
10698/**
10699 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10700 *
10701 * This will update the rSP.
10702 *
10703 * @returns Strict VBox status code.
10704 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10705 * @param pvMem The pointer returned by
10706 * iemMemStackPushBeginSpecial().
10707 * @param uNewRsp The new RSP value returned by
10708 * iemMemStackPushBeginSpecial().
10709 */
10710IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10711{
10712 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10713 if (rcStrict == VINF_SUCCESS)
10714 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10715 return rcStrict;
10716}
10717
10718
10719/**
10720 * Begin a special stack pop (used by iret, retf and such).
10721 *
10722 * This will raise \#SS or \#PF if appropriate.
10723 *
10724 * @returns Strict VBox status code.
10725 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10726 * @param cbMem The number of bytes to pop from the stack.
10727 * @param ppvMem Where to return the pointer to the stack memory.
10728 * @param puNewRsp Where to return the new RSP value. This must be
10729 * assigned to CPUMCTX::rsp manually some time
10730 * after iemMemStackPopDoneSpecial() has been
10731 * called.
10732 */
10733IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10734{
10735 Assert(cbMem < UINT8_MAX);
10736 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10737 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10738}
10739
10740
10741/**
10742 * Continue a special stack pop (used by iret and retf).
10743 *
10744 * This will raise \#SS or \#PF if appropriate.
10745 *
10746 * @returns Strict VBox status code.
10747 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10748 * @param cbMem The number of bytes to pop from the stack.
10749 * @param ppvMem Where to return the pointer to the stack memory.
10750 * @param puNewRsp Where to return the new RSP value. This must be
10751 * assigned to CPUMCTX::rsp manually some time
10752 * after iemMemStackPopDoneSpecial() has been
10753 * called.
10754 */
10755IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10756{
10757 Assert(cbMem < UINT8_MAX);
10758 RTUINT64U NewRsp;
10759 NewRsp.u = *puNewRsp;
10760 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10761 *puNewRsp = NewRsp.u;
10762 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10763}
10764
10765
10766/**
10767 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10768 * iemMemStackPopContinueSpecial).
10769 *
10770 * The caller will manually commit the rSP.
10771 *
10772 * @returns Strict VBox status code.
10773 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10774 * @param pvMem The pointer returned by
10775 * iemMemStackPopBeginSpecial() or
10776 * iemMemStackPopContinueSpecial().
10777 */
10778IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10779{
10780 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10781}
10782
10783
10784/**
10785 * Fetches a system table byte.
10786 *
10787 * @returns Strict VBox status code.
10788 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10789 * @param pbDst Where to return the byte.
10790 * @param iSegReg The index of the segment register to use for
10791 * this access. The base and limits are checked.
10792 * @param GCPtrMem The address of the guest memory.
10793 */
10794IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10795{
10796 /* The lazy approach for now... */
10797 uint8_t const *pbSrc;
10798 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10799 if (rc == VINF_SUCCESS)
10800 {
10801 *pbDst = *pbSrc;
10802 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10803 }
10804 return rc;
10805}
10806
10807
10808/**
10809 * Fetches a system table word.
10810 *
10811 * @returns Strict VBox status code.
10812 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10813 * @param pu16Dst Where to return the word.
10814 * @param iSegReg The index of the segment register to use for
10815 * this access. The base and limits are checked.
10816 * @param GCPtrMem The address of the guest memory.
10817 */
10818IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10819{
10820 /* The lazy approach for now... */
10821 uint16_t const *pu16Src;
10822 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10823 if (rc == VINF_SUCCESS)
10824 {
10825 *pu16Dst = *pu16Src;
10826 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10827 }
10828 return rc;
10829}
10830
10831
10832/**
10833 * Fetches a system table dword.
10834 *
10835 * @returns Strict VBox status code.
10836 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10837 * @param pu32Dst Where to return the dword.
10838 * @param iSegReg The index of the segment register to use for
10839 * this access. The base and limits are checked.
10840 * @param GCPtrMem The address of the guest memory.
10841 */
10842IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10843{
10844 /* The lazy approach for now... */
10845 uint32_t const *pu32Src;
10846 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10847 if (rc == VINF_SUCCESS)
10848 {
10849 *pu32Dst = *pu32Src;
10850 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10851 }
10852 return rc;
10853}
10854
10855
10856/**
10857 * Fetches a system table qword.
10858 *
10859 * @returns Strict VBox status code.
10860 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10861 * @param pu64Dst Where to return the qword.
10862 * @param iSegReg The index of the segment register to use for
10863 * this access. The base and limits are checked.
10864 * @param GCPtrMem The address of the guest memory.
10865 */
10866IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10867{
10868 /* The lazy approach for now... */
10869 uint64_t const *pu64Src;
10870 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10871 if (rc == VINF_SUCCESS)
10872 {
10873 *pu64Dst = *pu64Src;
10874 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10875 }
10876 return rc;
10877}
10878
10879
10880/**
10881 * Fetches a descriptor table entry with caller specified error code.
10882 *
10883 * @returns Strict VBox status code.
10884 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10885 * @param pDesc Where to return the descriptor table entry.
10886 * @param uSel The selector which table entry to fetch.
10887 * @param uXcpt The exception to raise on table lookup error.
10888 * @param uErrorCode The error code associated with the exception.
10889 */
10890IEM_STATIC VBOXSTRICTRC
10891iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10892{
10893 AssertPtr(pDesc);
10894 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10895
10896 /** @todo did the 286 require all 8 bytes to be accessible? */
10897 /*
10898 * Get the selector table base and check bounds.
10899 */
10900 RTGCPTR GCPtrBase;
10901 if (uSel & X86_SEL_LDT)
10902 {
10903 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10904 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10905 {
10906 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10907 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10908 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10909 uErrorCode, 0);
10910 }
10911
10912 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10913 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10914 }
10915 else
10916 {
10917 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10918 {
10919 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10920 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10921 uErrorCode, 0);
10922 }
10923 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10924 }
10925
10926 /*
10927 * Read the legacy descriptor and maybe the long mode extensions if
10928 * required.
10929 */
10930 VBOXSTRICTRC rcStrict;
10931 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10932 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10933 else
10934 {
10935 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10936 if (rcStrict == VINF_SUCCESS)
10937 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10938 if (rcStrict == VINF_SUCCESS)
10939 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10940 if (rcStrict == VINF_SUCCESS)
10941 pDesc->Legacy.au16[3] = 0;
10942 else
10943 return rcStrict;
10944 }
10945
10946 if (rcStrict == VINF_SUCCESS)
10947 {
10948 if ( !IEM_IS_LONG_MODE(pVCpu)
10949 || pDesc->Legacy.Gen.u1DescType)
10950 pDesc->Long.au64[1] = 0;
10951 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10952 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10953 else
10954 {
10955 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10956 /** @todo is this the right exception? */
10957 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10958 }
10959 }
10960 return rcStrict;
10961}
10962
10963
10964/**
10965 * Fetches a descriptor table entry.
10966 *
10967 * @returns Strict VBox status code.
10968 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10969 * @param pDesc Where to return the descriptor table entry.
10970 * @param uSel The selector which table entry to fetch.
10971 * @param uXcpt The exception to raise on table lookup error.
10972 */
10973IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10974{
10975 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10976}
10977
10978
10979/**
10980 * Fakes a long mode stack selector for SS = 0.
10981 *
10982 * @param pDescSs Where to return the fake stack descriptor.
10983 * @param uDpl The DPL we want.
10984 */
10985IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
10986{
10987 pDescSs->Long.au64[0] = 0;
10988 pDescSs->Long.au64[1] = 0;
10989 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
10990 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
10991 pDescSs->Long.Gen.u2Dpl = uDpl;
10992 pDescSs->Long.Gen.u1Present = 1;
10993 pDescSs->Long.Gen.u1Long = 1;
10994}
10995
10996
10997/**
10998 * Marks the selector descriptor as accessed (only non-system descriptors).
10999 *
11000 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11001 * will therefore skip the limit checks.
11002 *
11003 * @returns Strict VBox status code.
11004 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11005 * @param uSel The selector.
11006 */
11007IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11008{
11009 /*
11010 * Get the selector table base and calculate the entry address.
11011 */
11012 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11013 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11014 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11015 GCPtr += uSel & X86_SEL_MASK;
11016
11017 /*
11018 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11019 * ugly stuff to avoid this. This will make sure it's an atomic access
11020 * as well more or less remove any question about 8-bit or 32-bit accesss.
11021 */
11022 VBOXSTRICTRC rcStrict;
11023 uint32_t volatile *pu32;
11024 if ((GCPtr & 3) == 0)
11025 {
11026 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11027 GCPtr += 2 + 2;
11028 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11029 if (rcStrict != VINF_SUCCESS)
11030 return rcStrict;
11031 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11032 }
11033 else
11034 {
11035 /* The misaligned GDT/LDT case, map the whole thing. */
11036 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11037 if (rcStrict != VINF_SUCCESS)
11038 return rcStrict;
11039 switch ((uintptr_t)pu32 & 3)
11040 {
11041 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11042 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11043 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11044 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11045 }
11046 }
11047
11048 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11049}
11050
11051/** @} */
11052
11053
11054/*
11055 * Include the C/C++ implementation of instruction.
11056 */
11057#include "IEMAllCImpl.cpp.h"
11058
11059
11060
11061/** @name "Microcode" macros.
11062 *
11063 * The idea is that we should be able to use the same code to interpret
11064 * instructions as well as recompiler instructions. Thus this obfuscation.
11065 *
11066 * @{
11067 */
11068#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11069#define IEM_MC_END() }
11070#define IEM_MC_PAUSE() do {} while (0)
11071#define IEM_MC_CONTINUE() do {} while (0)
11072
11073/** Internal macro. */
11074#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11075 do \
11076 { \
11077 VBOXSTRICTRC rcStrict2 = a_Expr; \
11078 if (rcStrict2 != VINF_SUCCESS) \
11079 return rcStrict2; \
11080 } while (0)
11081
11082
11083#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11084#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11085#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11086#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11087#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11088#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11089#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11090#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11091#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11092 do { \
11093 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11094 return iemRaiseDeviceNotAvailable(pVCpu); \
11095 } while (0)
11096#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11097 do { \
11098 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11099 return iemRaiseDeviceNotAvailable(pVCpu); \
11100 } while (0)
11101#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11102 do { \
11103 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
11104 return iemRaiseMathFault(pVCpu); \
11105 } while (0)
11106#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11107 do { \
11108 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11109 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11110 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11111 return iemRaiseUndefinedOpcode(pVCpu); \
11112 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11113 return iemRaiseDeviceNotAvailable(pVCpu); \
11114 } while (0)
11115#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11116 do { \
11117 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11118 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11119 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11120 return iemRaiseUndefinedOpcode(pVCpu); \
11121 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11122 return iemRaiseDeviceNotAvailable(pVCpu); \
11123 } while (0)
11124#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11125 do { \
11126 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11127 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11128 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11129 return iemRaiseUndefinedOpcode(pVCpu); \
11130 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11131 return iemRaiseDeviceNotAvailable(pVCpu); \
11132 } while (0)
11133#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11134 do { \
11135 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11136 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11137 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11138 return iemRaiseUndefinedOpcode(pVCpu); \
11139 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11140 return iemRaiseDeviceNotAvailable(pVCpu); \
11141 } while (0)
11142#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11143 do { \
11144 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11145 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11146 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11147 return iemRaiseUndefinedOpcode(pVCpu); \
11148 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11149 return iemRaiseDeviceNotAvailable(pVCpu); \
11150 } while (0)
11151#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11152 do { \
11153 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11154 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11155 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11156 return iemRaiseUndefinedOpcode(pVCpu); \
11157 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11158 return iemRaiseDeviceNotAvailable(pVCpu); \
11159 } while (0)
11160#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11161 do { \
11162 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11163 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11164 return iemRaiseUndefinedOpcode(pVCpu); \
11165 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11166 return iemRaiseDeviceNotAvailable(pVCpu); \
11167 } while (0)
11168#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11169 do { \
11170 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11171 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11172 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11173 return iemRaiseUndefinedOpcode(pVCpu); \
11174 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11175 return iemRaiseDeviceNotAvailable(pVCpu); \
11176 } while (0)
11177#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11178 do { \
11179 if (pVCpu->iem.s.uCpl != 0) \
11180 return iemRaiseGeneralProtectionFault0(pVCpu); \
11181 } while (0)
11182#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11183 do { \
11184 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11185 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11186 } while (0)
11187#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11188 do { \
11189 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11190 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11191 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11192 return iemRaiseUndefinedOpcode(pVCpu); \
11193 } while (0)
11194#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11195 do { \
11196 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11197 return iemRaiseGeneralProtectionFault0(pVCpu); \
11198 } while (0)
11199
11200
11201#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11202#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11203#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11204#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11205#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11206#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11207#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11208 uint32_t a_Name; \
11209 uint32_t *a_pName = &a_Name
11210#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11211 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11212
11213#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11214#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11215
11216#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11217#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11218#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11219#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11220#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11221#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11222#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11223#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11224#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11225#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11226#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11227#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11228#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11229#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11230#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11231#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11232#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11233#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11234 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11235 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11236 } while (0)
11237#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11238 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11239 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11240 } while (0)
11241#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11242 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11243 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11244 } while (0)
11245/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11246#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11247 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11248 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11249 } while (0)
11250#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11251 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11252 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11253 } while (0)
11254/** @note Not for IOPL or IF testing or modification. */
11255#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11256#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11257#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
11258#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
11259
11260#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11261#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11262#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11263#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11264#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11265#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11266#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11267#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11268#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11269#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11270/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11271#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11272 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11273 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11274 } while (0)
11275#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11276 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11277 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11278 } while (0)
11279#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11280 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11281
11282
11283#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11284#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11285/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11286 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11287#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11288#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11289/** @note Not for IOPL or IF testing or modification. */
11290#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11291
11292#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11293#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11294#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11295 do { \
11296 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11297 *pu32Reg += (a_u32Value); \
11298 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11299 } while (0)
11300#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11301
11302#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11303#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11304#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11305 do { \
11306 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11307 *pu32Reg -= (a_u32Value); \
11308 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11309 } while (0)
11310#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11311#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11312
11313#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11314#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11315#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11316#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11317#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11318#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11319#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11320
11321#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11322#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11323#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11324#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11325
11326#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11327#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11328#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11329
11330#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11331#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11332#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11333
11334#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11335#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11336#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11337
11338#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11339#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11340#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11341
11342#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11343
11344#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11345
11346#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11347#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11348#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11349 do { \
11350 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11351 *pu32Reg &= (a_u32Value); \
11352 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11353 } while (0)
11354#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11355
11356#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11357#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11358#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11359 do { \
11360 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11361 *pu32Reg |= (a_u32Value); \
11362 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11363 } while (0)
11364#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11365
11366
11367/** @note Not for IOPL or IF modification. */
11368#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11369/** @note Not for IOPL or IF modification. */
11370#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11371/** @note Not for IOPL or IF modification. */
11372#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11373
11374#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11375
11376/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11377#define IEM_MC_FPU_TO_MMX_MODE() do { \
11378 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
11379 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
11380 } while (0)
11381
11382/** Switches the FPU state from MMX mode (FTW=0xffff). */
11383#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11384 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
11385 } while (0)
11386
11387#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11388 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
11389#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11390 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11391#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11392 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11393 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11394 } while (0)
11395#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11396 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11397 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11398 } while (0)
11399#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11400 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11401#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11402 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11403#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11404 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11405
11406#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11407 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
11408 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
11409 } while (0)
11410#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11411 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11412#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11413 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11414#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11415 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11416#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11417 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11418 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11419 } while (0)
11420#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11421 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11422#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11423 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11424 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11425 } while (0)
11426#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11427 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11428#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11429 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11430 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11431 } while (0)
11432#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11433 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11434#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11435 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11436#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11437 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11438#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11439 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
11440#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11441 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
11442 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
11443 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
11444 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
11445 } while (0)
11446
11447#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11448 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11449 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
11450 } while (0)
11451#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11452 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11453 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11454 } while (0)
11455#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11456 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11457 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11458 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11459 } while (0)
11460#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11461 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11462 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11463 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11464 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11465 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11466 } while (0)
11467
11468#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11469#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11470 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11471 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11472 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11473 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11474 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11475 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11476 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11477 } while (0)
11478#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11479 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11480 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11481 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11482 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11483 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11484 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11485 } while (0)
11486#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11487 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11488 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11489 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11490 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11491 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11492 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11493 } while (0)
11494#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11495 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11496 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11497 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11498 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11499 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11500 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11501 } while (0)
11502
11503#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11504 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11505#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11506 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11507#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11508 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
11509#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11510 do { uintptr_t const iYRegTmp = (a_iYReg); \
11511 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11512 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11513 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
11514 } while (0)
11515
11516#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11517 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11518 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11519 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11520 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11521 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11522 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11523 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11524 } while (0)
11525#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11526 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11527 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11528 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11529 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11530 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11531 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11532 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11533 } while (0)
11534#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11535 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11536 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11537 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11538 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11539 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11540 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11541 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11542 } while (0)
11543
11544#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11545 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11546 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11547 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11548 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11549 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11550 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11551 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11552 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11553 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11554 } while (0)
11555#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11556 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11557 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11558 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11559 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11560 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11561 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11562 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11563 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11564 } while (0)
11565#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11566 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11567 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11568 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11569 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11570 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11571 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11572 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11573 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11574 } while (0)
11575#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11576 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11577 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11578 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11579 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11580 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11581 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11582 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11583 } while (0)
11584
11585#ifndef IEM_WITH_SETJMP
11586# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11587 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11588# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11589 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11590# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11591 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11592#else
11593# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11594 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11595# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11596 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11597# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11598 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11599#endif
11600
11601#ifndef IEM_WITH_SETJMP
11602# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11603 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11604# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11605 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11606# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11607 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11608#else
11609# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11610 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11611# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11612 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11613# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11614 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11615#endif
11616
11617#ifndef IEM_WITH_SETJMP
11618# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11619 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11620# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11621 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11622# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11623 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11624#else
11625# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11626 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11627# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11628 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11629# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11630 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11631#endif
11632
11633#ifdef SOME_UNUSED_FUNCTION
11634# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11635 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11636#endif
11637
11638#ifndef IEM_WITH_SETJMP
11639# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11640 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11641# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11643# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11644 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11645# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11646 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11647#else
11648# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11649 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11650# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11651 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11652# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11653 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11654# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11655 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11656#endif
11657
11658#ifndef IEM_WITH_SETJMP
11659# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11660 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11661# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11662 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11663# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11664 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11665#else
11666# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11667 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11668# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11669 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11670# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11671 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11672#endif
11673
11674#ifndef IEM_WITH_SETJMP
11675# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11676 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11677# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11678 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11679#else
11680# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11681 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11682# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11683 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11684#endif
11685
11686#ifndef IEM_WITH_SETJMP
11687# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11688 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11689# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11690 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11691#else
11692# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11693 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11694# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11695 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11696#endif
11697
11698
11699
11700#ifndef IEM_WITH_SETJMP
11701# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11702 do { \
11703 uint8_t u8Tmp; \
11704 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11705 (a_u16Dst) = u8Tmp; \
11706 } while (0)
11707# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11708 do { \
11709 uint8_t u8Tmp; \
11710 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11711 (a_u32Dst) = u8Tmp; \
11712 } while (0)
11713# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11714 do { \
11715 uint8_t u8Tmp; \
11716 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11717 (a_u64Dst) = u8Tmp; \
11718 } while (0)
11719# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11720 do { \
11721 uint16_t u16Tmp; \
11722 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11723 (a_u32Dst) = u16Tmp; \
11724 } while (0)
11725# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11726 do { \
11727 uint16_t u16Tmp; \
11728 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11729 (a_u64Dst) = u16Tmp; \
11730 } while (0)
11731# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11732 do { \
11733 uint32_t u32Tmp; \
11734 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11735 (a_u64Dst) = u32Tmp; \
11736 } while (0)
11737#else /* IEM_WITH_SETJMP */
11738# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11739 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11740# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11741 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11742# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11743 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11744# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11745 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11746# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11747 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11748# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11749 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11750#endif /* IEM_WITH_SETJMP */
11751
11752#ifndef IEM_WITH_SETJMP
11753# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11754 do { \
11755 uint8_t u8Tmp; \
11756 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11757 (a_u16Dst) = (int8_t)u8Tmp; \
11758 } while (0)
11759# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11760 do { \
11761 uint8_t u8Tmp; \
11762 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11763 (a_u32Dst) = (int8_t)u8Tmp; \
11764 } while (0)
11765# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11766 do { \
11767 uint8_t u8Tmp; \
11768 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11769 (a_u64Dst) = (int8_t)u8Tmp; \
11770 } while (0)
11771# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11772 do { \
11773 uint16_t u16Tmp; \
11774 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11775 (a_u32Dst) = (int16_t)u16Tmp; \
11776 } while (0)
11777# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11778 do { \
11779 uint16_t u16Tmp; \
11780 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11781 (a_u64Dst) = (int16_t)u16Tmp; \
11782 } while (0)
11783# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11784 do { \
11785 uint32_t u32Tmp; \
11786 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11787 (a_u64Dst) = (int32_t)u32Tmp; \
11788 } while (0)
11789#else /* IEM_WITH_SETJMP */
11790# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11791 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11792# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11793 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11794# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11795 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11796# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11797 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11798# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11799 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11800# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11801 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11802#endif /* IEM_WITH_SETJMP */
11803
11804#ifndef IEM_WITH_SETJMP
11805# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11806 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11807# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11808 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11809# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11810 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11811# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11812 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11813#else
11814# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11815 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11816# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11817 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11818# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11819 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11820# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11821 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11822#endif
11823
11824#ifndef IEM_WITH_SETJMP
11825# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11826 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11827# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11828 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11829# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11830 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11831# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11832 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11833#else
11834# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11835 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11836# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11837 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11838# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11839 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11840# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11841 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11842#endif
11843
11844#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11845#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11846#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11847#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11848#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11849#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11850#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11851 do { \
11852 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11853 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11854 } while (0)
11855
11856#ifndef IEM_WITH_SETJMP
11857# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11858 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11859# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11860 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11861#else
11862# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11863 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11864# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11865 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11866#endif
11867
11868#ifndef IEM_WITH_SETJMP
11869# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11870 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11871# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11872 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11873#else
11874# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11875 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11876# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11877 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11878#endif
11879
11880
11881#define IEM_MC_PUSH_U16(a_u16Value) \
11882 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11883#define IEM_MC_PUSH_U32(a_u32Value) \
11884 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11885#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11886 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11887#define IEM_MC_PUSH_U64(a_u64Value) \
11888 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11889
11890#define IEM_MC_POP_U16(a_pu16Value) \
11891 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11892#define IEM_MC_POP_U32(a_pu32Value) \
11893 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11894#define IEM_MC_POP_U64(a_pu64Value) \
11895 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11896
11897/** Maps guest memory for direct or bounce buffered access.
11898 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11899 * @remarks May return.
11900 */
11901#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11902 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11903
11904/** Maps guest memory for direct or bounce buffered access.
11905 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11906 * @remarks May return.
11907 */
11908#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11909 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11910
11911/** Commits the memory and unmaps the guest memory.
11912 * @remarks May return.
11913 */
11914#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11915 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11916
11917/** Commits the memory and unmaps the guest memory unless the FPU status word
11918 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11919 * that would cause FLD not to store.
11920 *
11921 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11922 * store, while \#P will not.
11923 *
11924 * @remarks May in theory return - for now.
11925 */
11926#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11927 do { \
11928 if ( !(a_u16FSW & X86_FSW_ES) \
11929 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11930 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
11931 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11932 } while (0)
11933
11934/** Calculate efficient address from R/M. */
11935#ifndef IEM_WITH_SETJMP
11936# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11937 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11938#else
11939# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11940 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11941#endif
11942
11943#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11944#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11945#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11946#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11947#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11948#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11949#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11950
11951/**
11952 * Defers the rest of the instruction emulation to a C implementation routine
11953 * and returns, only taking the standard parameters.
11954 *
11955 * @param a_pfnCImpl The pointer to the C routine.
11956 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11957 */
11958#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11959
11960/**
11961 * Defers the rest of instruction emulation to a C implementation routine and
11962 * returns, taking one argument in addition to the standard ones.
11963 *
11964 * @param a_pfnCImpl The pointer to the C routine.
11965 * @param a0 The argument.
11966 */
11967#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11968
11969/**
11970 * Defers the rest of the instruction emulation to a C implementation routine
11971 * and returns, taking two arguments in addition to the standard ones.
11972 *
11973 * @param a_pfnCImpl The pointer to the C routine.
11974 * @param a0 The first extra argument.
11975 * @param a1 The second extra argument.
11976 */
11977#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11978
11979/**
11980 * Defers the rest of the instruction emulation to a C implementation routine
11981 * and returns, taking three arguments in addition to the standard ones.
11982 *
11983 * @param a_pfnCImpl The pointer to the C routine.
11984 * @param a0 The first extra argument.
11985 * @param a1 The second extra argument.
11986 * @param a2 The third extra argument.
11987 */
11988#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
11989
11990/**
11991 * Defers the rest of the instruction emulation to a C implementation routine
11992 * and returns, taking four arguments in addition to the standard ones.
11993 *
11994 * @param a_pfnCImpl The pointer to the C routine.
11995 * @param a0 The first extra argument.
11996 * @param a1 The second extra argument.
11997 * @param a2 The third extra argument.
11998 * @param a3 The fourth extra argument.
11999 */
12000#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12001
12002/**
12003 * Defers the rest of the instruction emulation to a C implementation routine
12004 * and returns, taking two arguments in addition to the standard ones.
12005 *
12006 * @param a_pfnCImpl The pointer to the C routine.
12007 * @param a0 The first extra argument.
12008 * @param a1 The second extra argument.
12009 * @param a2 The third extra argument.
12010 * @param a3 The fourth extra argument.
12011 * @param a4 The fifth extra argument.
12012 */
12013#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12014
12015/**
12016 * Defers the entire instruction emulation to a C implementation routine and
12017 * returns, only taking the standard parameters.
12018 *
12019 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12020 *
12021 * @param a_pfnCImpl The pointer to the C routine.
12022 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12023 */
12024#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12025
12026/**
12027 * Defers the entire instruction emulation to a C implementation routine and
12028 * returns, taking one argument in addition to the standard ones.
12029 *
12030 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12031 *
12032 * @param a_pfnCImpl The pointer to the C routine.
12033 * @param a0 The argument.
12034 */
12035#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12036
12037/**
12038 * Defers the entire instruction emulation to a C implementation routine and
12039 * returns, taking two arguments in addition to the standard ones.
12040 *
12041 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12042 *
12043 * @param a_pfnCImpl The pointer to the C routine.
12044 * @param a0 The first extra argument.
12045 * @param a1 The second extra argument.
12046 */
12047#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12048
12049/**
12050 * Defers the entire instruction emulation to a C implementation routine and
12051 * returns, taking three arguments in addition to the standard ones.
12052 *
12053 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12054 *
12055 * @param a_pfnCImpl The pointer to the C routine.
12056 * @param a0 The first extra argument.
12057 * @param a1 The second extra argument.
12058 * @param a2 The third extra argument.
12059 */
12060#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12061
12062/**
12063 * Calls a FPU assembly implementation taking one visible argument.
12064 *
12065 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12066 * @param a0 The first extra argument.
12067 */
12068#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12069 do { \
12070 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
12071 } while (0)
12072
12073/**
12074 * Calls a FPU assembly implementation taking two visible arguments.
12075 *
12076 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12077 * @param a0 The first extra argument.
12078 * @param a1 The second extra argument.
12079 */
12080#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12081 do { \
12082 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12083 } while (0)
12084
12085/**
12086 * Calls a FPU assembly implementation taking three visible arguments.
12087 *
12088 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12089 * @param a0 The first extra argument.
12090 * @param a1 The second extra argument.
12091 * @param a2 The third extra argument.
12092 */
12093#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12094 do { \
12095 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12096 } while (0)
12097
12098#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12099 do { \
12100 (a_FpuData).FSW = (a_FSW); \
12101 (a_FpuData).r80Result = *(a_pr80Value); \
12102 } while (0)
12103
12104/** Pushes FPU result onto the stack. */
12105#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12106 iemFpuPushResult(pVCpu, &a_FpuData)
12107/** Pushes FPU result onto the stack and sets the FPUDP. */
12108#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12109 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12110
12111/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12112#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12113 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12114
12115/** Stores FPU result in a stack register. */
12116#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12117 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12118/** Stores FPU result in a stack register and pops the stack. */
12119#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12120 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12121/** Stores FPU result in a stack register and sets the FPUDP. */
12122#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12123 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12124/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12125 * stack. */
12126#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12127 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12128
12129/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12130#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12131 iemFpuUpdateOpcodeAndIp(pVCpu)
12132/** Free a stack register (for FFREE and FFREEP). */
12133#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12134 iemFpuStackFree(pVCpu, a_iStReg)
12135/** Increment the FPU stack pointer. */
12136#define IEM_MC_FPU_STACK_INC_TOP() \
12137 iemFpuStackIncTop(pVCpu)
12138/** Decrement the FPU stack pointer. */
12139#define IEM_MC_FPU_STACK_DEC_TOP() \
12140 iemFpuStackDecTop(pVCpu)
12141
12142/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12143#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12144 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12145/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12146#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12147 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12148/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12149#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12150 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12151/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12152#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12153 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12154/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12155 * stack. */
12156#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12157 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12158/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12159#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12160 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12161
12162/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12163#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12164 iemFpuStackUnderflow(pVCpu, a_iStDst)
12165/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12166 * stack. */
12167#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12168 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12169/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12170 * FPUDS. */
12171#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12172 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12173/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12174 * FPUDS. Pops stack. */
12175#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12176 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12177/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12178 * stack twice. */
12179#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12180 iemFpuStackUnderflowThenPopPop(pVCpu)
12181/** Raises a FPU stack underflow exception for an instruction pushing a result
12182 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12183#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12184 iemFpuStackPushUnderflow(pVCpu)
12185/** Raises a FPU stack underflow exception for an instruction pushing a result
12186 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12187#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12188 iemFpuStackPushUnderflowTwo(pVCpu)
12189
12190/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12191 * FPUIP, FPUCS and FOP. */
12192#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12193 iemFpuStackPushOverflow(pVCpu)
12194/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12195 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12196#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12197 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12198/** Prepares for using the FPU state.
12199 * Ensures that we can use the host FPU in the current context (RC+R0.
12200 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12201#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12202/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12203#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12204/** Actualizes the guest FPU state so it can be accessed and modified. */
12205#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12206
12207/** Prepares for using the SSE state.
12208 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12209 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12210#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12211/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12212#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12213/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12214#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12215
12216/** Prepares for using the AVX state.
12217 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12218 * Ensures the guest AVX state in the CPUMCTX is up to date.
12219 * @note This will include the AVX512 state too when support for it is added
12220 * due to the zero extending feature of VEX instruction. */
12221#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12222/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12223#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12224/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12225#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12226
12227/**
12228 * Calls a MMX assembly implementation taking two visible arguments.
12229 *
12230 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12231 * @param a0 The first extra argument.
12232 * @param a1 The second extra argument.
12233 */
12234#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12235 do { \
12236 IEM_MC_PREPARE_FPU_USAGE(); \
12237 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12238 } while (0)
12239
12240/**
12241 * Calls a MMX assembly implementation taking three visible arguments.
12242 *
12243 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12244 * @param a0 The first extra argument.
12245 * @param a1 The second extra argument.
12246 * @param a2 The third extra argument.
12247 */
12248#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12249 do { \
12250 IEM_MC_PREPARE_FPU_USAGE(); \
12251 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12252 } while (0)
12253
12254
12255/**
12256 * Calls a SSE assembly implementation taking two visible arguments.
12257 *
12258 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12259 * @param a0 The first extra argument.
12260 * @param a1 The second extra argument.
12261 */
12262#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12263 do { \
12264 IEM_MC_PREPARE_SSE_USAGE(); \
12265 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12266 } while (0)
12267
12268/**
12269 * Calls a SSE assembly implementation taking three visible arguments.
12270 *
12271 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12272 * @param a0 The first extra argument.
12273 * @param a1 The second extra argument.
12274 * @param a2 The third extra argument.
12275 */
12276#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12277 do { \
12278 IEM_MC_PREPARE_SSE_USAGE(); \
12279 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12280 } while (0)
12281
12282
12283/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12284 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12285#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12286 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
12287
12288/**
12289 * Calls a AVX assembly implementation taking two visible arguments.
12290 *
12291 * There is one implicit zero'th argument, a pointer to the extended state.
12292 *
12293 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12294 * @param a1 The first extra argument.
12295 * @param a2 The second extra argument.
12296 */
12297#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12298 do { \
12299 IEM_MC_PREPARE_AVX_USAGE(); \
12300 a_pfnAImpl(pXState, (a1), (a2)); \
12301 } while (0)
12302
12303/**
12304 * Calls a AVX assembly implementation taking three visible arguments.
12305 *
12306 * There is one implicit zero'th argument, a pointer to the extended state.
12307 *
12308 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12309 * @param a1 The first extra argument.
12310 * @param a2 The second extra argument.
12311 * @param a3 The third extra argument.
12312 */
12313#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12314 do { \
12315 IEM_MC_PREPARE_AVX_USAGE(); \
12316 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12317 } while (0)
12318
12319/** @note Not for IOPL or IF testing. */
12320#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12321/** @note Not for IOPL or IF testing. */
12322#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12323/** @note Not for IOPL or IF testing. */
12324#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12325/** @note Not for IOPL or IF testing. */
12326#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12327/** @note Not for IOPL or IF testing. */
12328#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12329 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12330 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12331/** @note Not for IOPL or IF testing. */
12332#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12333 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12334 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12335/** @note Not for IOPL or IF testing. */
12336#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12337 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12338 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12339 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12340/** @note Not for IOPL or IF testing. */
12341#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12342 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12343 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12344 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12345#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12346#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12347#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12348/** @note Not for IOPL or IF testing. */
12349#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12350 if ( pVCpu->cpum.GstCtx.cx != 0 \
12351 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12352/** @note Not for IOPL or IF testing. */
12353#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12354 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12355 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12356/** @note Not for IOPL or IF testing. */
12357#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12358 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12359 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12360/** @note Not for IOPL or IF testing. */
12361#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12362 if ( pVCpu->cpum.GstCtx.cx != 0 \
12363 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12364/** @note Not for IOPL or IF testing. */
12365#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12366 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12367 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12368/** @note Not for IOPL or IF testing. */
12369#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12370 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12371 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12372#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12373#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12374
12375#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12376 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12377#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12378 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12379#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12380 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12381#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12382 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12383#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12384 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12385#define IEM_MC_IF_FCW_IM() \
12386 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
12387
12388#define IEM_MC_ELSE() } else {
12389#define IEM_MC_ENDIF() } do {} while (0)
12390
12391/** @} */
12392
12393
12394/** @name Opcode Debug Helpers.
12395 * @{
12396 */
12397#ifdef VBOX_WITH_STATISTICS
12398# ifdef IN_RING3
12399# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
12400# else
12401# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
12402# endif
12403#else
12404# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12405#endif
12406
12407#ifdef DEBUG
12408# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12409 do { \
12410 IEMOP_INC_STATS(a_Stats); \
12411 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12412 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12413 } while (0)
12414
12415# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12416 do { \
12417 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12418 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12419 (void)RT_CONCAT(OP_,a_Upper); \
12420 (void)(a_fDisHints); \
12421 (void)(a_fIemHints); \
12422 } while (0)
12423
12424# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12425 do { \
12426 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12427 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12428 (void)RT_CONCAT(OP_,a_Upper); \
12429 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12430 (void)(a_fDisHints); \
12431 (void)(a_fIemHints); \
12432 } while (0)
12433
12434# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12435 do { \
12436 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12437 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12438 (void)RT_CONCAT(OP_,a_Upper); \
12439 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12440 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12441 (void)(a_fDisHints); \
12442 (void)(a_fIemHints); \
12443 } while (0)
12444
12445# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12446 do { \
12447 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12448 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12449 (void)RT_CONCAT(OP_,a_Upper); \
12450 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12451 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12452 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12453 (void)(a_fDisHints); \
12454 (void)(a_fIemHints); \
12455 } while (0)
12456
12457# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12458 do { \
12459 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12460 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12461 (void)RT_CONCAT(OP_,a_Upper); \
12462 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12463 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12464 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12465 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12466 (void)(a_fDisHints); \
12467 (void)(a_fIemHints); \
12468 } while (0)
12469
12470#else
12471# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12472
12473# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12474 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12475# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12476 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12477# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12478 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12479# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12480 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12481# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12482 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12483
12484#endif
12485
12486#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12487 IEMOP_MNEMONIC0EX(a_Lower, \
12488 #a_Lower, \
12489 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12490#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12491 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12492 #a_Lower " " #a_Op1, \
12493 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12494#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12495 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12496 #a_Lower " " #a_Op1 "," #a_Op2, \
12497 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12498#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12499 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12500 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12501 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12502#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12503 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12504 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12505 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12506
12507/** @} */
12508
12509
12510/** @name Opcode Helpers.
12511 * @{
12512 */
12513
12514#ifdef IN_RING3
12515# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12516 do { \
12517 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12518 else \
12519 { \
12520 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12521 return IEMOP_RAISE_INVALID_OPCODE(); \
12522 } \
12523 } while (0)
12524#else
12525# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12526 do { \
12527 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12528 else return IEMOP_RAISE_INVALID_OPCODE(); \
12529 } while (0)
12530#endif
12531
12532/** The instruction requires a 186 or later. */
12533#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12534# define IEMOP_HLP_MIN_186() do { } while (0)
12535#else
12536# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12537#endif
12538
12539/** The instruction requires a 286 or later. */
12540#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12541# define IEMOP_HLP_MIN_286() do { } while (0)
12542#else
12543# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12544#endif
12545
12546/** The instruction requires a 386 or later. */
12547#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12548# define IEMOP_HLP_MIN_386() do { } while (0)
12549#else
12550# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12551#endif
12552
12553/** The instruction requires a 386 or later if the given expression is true. */
12554#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12555# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12556#else
12557# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12558#endif
12559
12560/** The instruction requires a 486 or later. */
12561#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12562# define IEMOP_HLP_MIN_486() do { } while (0)
12563#else
12564# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12565#endif
12566
12567/** The instruction requires a Pentium (586) or later. */
12568#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12569# define IEMOP_HLP_MIN_586() do { } while (0)
12570#else
12571# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12572#endif
12573
12574/** The instruction requires a PentiumPro (686) or later. */
12575#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12576# define IEMOP_HLP_MIN_686() do { } while (0)
12577#else
12578# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12579#endif
12580
12581
12582/** The instruction raises an \#UD in real and V8086 mode. */
12583#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12584 do \
12585 { \
12586 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12587 else return IEMOP_RAISE_INVALID_OPCODE(); \
12588 } while (0)
12589
12590#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12591/** This instruction raises an \#UD in real and V8086 mode or when not using a
12592 * 64-bit code segment when in long mode (applicable to all VMX instructions
12593 * except VMCALL).
12594 */
12595#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12596 do \
12597 { \
12598 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12599 && ( !IEM_IS_LONG_MODE(pVCpu) \
12600 || IEM_IS_64BIT_CODE(pVCpu))) \
12601 { /* likely */ } \
12602 else \
12603 { \
12604 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12605 { \
12606 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12607 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12608 return IEMOP_RAISE_INVALID_OPCODE(); \
12609 } \
12610 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12611 { \
12612 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12613 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12614 return IEMOP_RAISE_INVALID_OPCODE(); \
12615 } \
12616 } \
12617 } while (0)
12618
12619/** The instruction can only be executed in VMX operation (VMX root mode and
12620 * non-root mode).
12621 *
12622 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12623 */
12624# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12625 do \
12626 { \
12627 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12628 else \
12629 { \
12630 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12631 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12632 return IEMOP_RAISE_INVALID_OPCODE(); \
12633 } \
12634 } while (0)
12635#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12636
12637/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12638 * 64-bit mode. */
12639#define IEMOP_HLP_NO_64BIT() \
12640 do \
12641 { \
12642 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12643 return IEMOP_RAISE_INVALID_OPCODE(); \
12644 } while (0)
12645
12646/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12647 * 64-bit mode. */
12648#define IEMOP_HLP_ONLY_64BIT() \
12649 do \
12650 { \
12651 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12652 return IEMOP_RAISE_INVALID_OPCODE(); \
12653 } while (0)
12654
12655/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12656#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12657 do \
12658 { \
12659 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12660 iemRecalEffOpSize64Default(pVCpu); \
12661 } while (0)
12662
12663/** The instruction has 64-bit operand size if 64-bit mode. */
12664#define IEMOP_HLP_64BIT_OP_SIZE() \
12665 do \
12666 { \
12667 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12668 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12669 } while (0)
12670
12671/** Only a REX prefix immediately preceeding the first opcode byte takes
12672 * effect. This macro helps ensuring this as well as logging bad guest code. */
12673#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12674 do \
12675 { \
12676 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12677 { \
12678 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12679 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12680 pVCpu->iem.s.uRexB = 0; \
12681 pVCpu->iem.s.uRexIndex = 0; \
12682 pVCpu->iem.s.uRexReg = 0; \
12683 iemRecalEffOpSize(pVCpu); \
12684 } \
12685 } while (0)
12686
12687/**
12688 * Done decoding.
12689 */
12690#define IEMOP_HLP_DONE_DECODING() \
12691 do \
12692 { \
12693 /*nothing for now, maybe later... */ \
12694 } while (0)
12695
12696/**
12697 * Done decoding, raise \#UD exception if lock prefix present.
12698 */
12699#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12700 do \
12701 { \
12702 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12703 { /* likely */ } \
12704 else \
12705 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12706 } while (0)
12707
12708
12709/**
12710 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12711 * repnz or size prefixes are present, or if in real or v8086 mode.
12712 */
12713#define IEMOP_HLP_DONE_VEX_DECODING() \
12714 do \
12715 { \
12716 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12717 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12718 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12719 { /* likely */ } \
12720 else \
12721 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12722 } while (0)
12723
12724/**
12725 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12726 * repnz or size prefixes are present, or if in real or v8086 mode.
12727 */
12728#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12729 do \
12730 { \
12731 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12732 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12733 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12734 && pVCpu->iem.s.uVexLength == 0)) \
12735 { /* likely */ } \
12736 else \
12737 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12738 } while (0)
12739
12740
12741/**
12742 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12743 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12744 * register 0, or if in real or v8086 mode.
12745 */
12746#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12747 do \
12748 { \
12749 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12750 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12751 && !pVCpu->iem.s.uVex3rdReg \
12752 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12753 { /* likely */ } \
12754 else \
12755 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12756 } while (0)
12757
12758/**
12759 * Done decoding VEX, no V, L=0.
12760 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12761 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12762 */
12763#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12764 do \
12765 { \
12766 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12767 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12768 && pVCpu->iem.s.uVexLength == 0 \
12769 && pVCpu->iem.s.uVex3rdReg == 0 \
12770 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12771 { /* likely */ } \
12772 else \
12773 return IEMOP_RAISE_INVALID_OPCODE(); \
12774 } while (0)
12775
12776#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12777 do \
12778 { \
12779 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12780 { /* likely */ } \
12781 else \
12782 { \
12783 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12784 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12785 } \
12786 } while (0)
12787#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12788 do \
12789 { \
12790 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12791 { /* likely */ } \
12792 else \
12793 { \
12794 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12795 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12796 } \
12797 } while (0)
12798
12799/**
12800 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12801 * are present.
12802 */
12803#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12804 do \
12805 { \
12806 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12807 { /* likely */ } \
12808 else \
12809 return IEMOP_RAISE_INVALID_OPCODE(); \
12810 } while (0)
12811
12812/**
12813 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12814 * prefixes are present.
12815 */
12816#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12817 do \
12818 { \
12819 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12820 { /* likely */ } \
12821 else \
12822 return IEMOP_RAISE_INVALID_OPCODE(); \
12823 } while (0)
12824
12825
12826/**
12827 * Calculates the effective address of a ModR/M memory operand.
12828 *
12829 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12830 *
12831 * @return Strict VBox status code.
12832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12833 * @param bRm The ModRM byte.
12834 * @param cbImm The size of any immediate following the
12835 * effective address opcode bytes. Important for
12836 * RIP relative addressing.
12837 * @param pGCPtrEff Where to return the effective address.
12838 */
12839IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12840{
12841 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12842# define SET_SS_DEF() \
12843 do \
12844 { \
12845 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12846 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12847 } while (0)
12848
12849 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12850 {
12851/** @todo Check the effective address size crap! */
12852 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12853 {
12854 uint16_t u16EffAddr;
12855
12856 /* Handle the disp16 form with no registers first. */
12857 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12858 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12859 else
12860 {
12861 /* Get the displacment. */
12862 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12863 {
12864 case 0: u16EffAddr = 0; break;
12865 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12866 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12867 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12868 }
12869
12870 /* Add the base and index registers to the disp. */
12871 switch (bRm & X86_MODRM_RM_MASK)
12872 {
12873 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12874 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12875 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12876 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12877 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12878 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12879 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12880 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12881 }
12882 }
12883
12884 *pGCPtrEff = u16EffAddr;
12885 }
12886 else
12887 {
12888 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12889 uint32_t u32EffAddr;
12890
12891 /* Handle the disp32 form with no registers first. */
12892 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12893 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12894 else
12895 {
12896 /* Get the register (or SIB) value. */
12897 switch ((bRm & X86_MODRM_RM_MASK))
12898 {
12899 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12900 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12901 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12902 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12903 case 4: /* SIB */
12904 {
12905 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12906
12907 /* Get the index and scale it. */
12908 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12909 {
12910 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12911 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12912 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12913 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12914 case 4: u32EffAddr = 0; /*none */ break;
12915 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12916 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12917 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12918 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12919 }
12920 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12921
12922 /* add base */
12923 switch (bSib & X86_SIB_BASE_MASK)
12924 {
12925 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12926 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12927 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12928 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12929 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12930 case 5:
12931 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12932 {
12933 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12934 SET_SS_DEF();
12935 }
12936 else
12937 {
12938 uint32_t u32Disp;
12939 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12940 u32EffAddr += u32Disp;
12941 }
12942 break;
12943 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12944 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12945 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12946 }
12947 break;
12948 }
12949 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12950 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12951 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12952 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12953 }
12954
12955 /* Get and add the displacement. */
12956 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12957 {
12958 case 0:
12959 break;
12960 case 1:
12961 {
12962 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12963 u32EffAddr += i8Disp;
12964 break;
12965 }
12966 case 2:
12967 {
12968 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12969 u32EffAddr += u32Disp;
12970 break;
12971 }
12972 default:
12973 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12974 }
12975
12976 }
12977 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12978 *pGCPtrEff = u32EffAddr;
12979 else
12980 {
12981 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
12982 *pGCPtrEff = u32EffAddr & UINT16_MAX;
12983 }
12984 }
12985 }
12986 else
12987 {
12988 uint64_t u64EffAddr;
12989
12990 /* Handle the rip+disp32 form with no registers first. */
12991 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12992 {
12993 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
12994 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
12995 }
12996 else
12997 {
12998 /* Get the register (or SIB) value. */
12999 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13000 {
13001 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13002 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13003 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13004 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13005 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13006 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13007 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13008 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13009 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13010 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13011 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13012 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13013 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13014 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13015 /* SIB */
13016 case 4:
13017 case 12:
13018 {
13019 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13020
13021 /* Get the index and scale it. */
13022 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13023 {
13024 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13025 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13026 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13027 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13028 case 4: u64EffAddr = 0; /*none */ break;
13029 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13030 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13031 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13032 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13033 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13034 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13035 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13036 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13037 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13038 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13039 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13040 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13041 }
13042 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13043
13044 /* add base */
13045 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13046 {
13047 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13048 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13049 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13050 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13051 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13052 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13053 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13054 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13055 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13056 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13057 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13058 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13059 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13060 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13061 /* complicated encodings */
13062 case 5:
13063 case 13:
13064 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13065 {
13066 if (!pVCpu->iem.s.uRexB)
13067 {
13068 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13069 SET_SS_DEF();
13070 }
13071 else
13072 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13073 }
13074 else
13075 {
13076 uint32_t u32Disp;
13077 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13078 u64EffAddr += (int32_t)u32Disp;
13079 }
13080 break;
13081 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13082 }
13083 break;
13084 }
13085 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13086 }
13087
13088 /* Get and add the displacement. */
13089 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13090 {
13091 case 0:
13092 break;
13093 case 1:
13094 {
13095 int8_t i8Disp;
13096 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13097 u64EffAddr += i8Disp;
13098 break;
13099 }
13100 case 2:
13101 {
13102 uint32_t u32Disp;
13103 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13104 u64EffAddr += (int32_t)u32Disp;
13105 break;
13106 }
13107 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13108 }
13109
13110 }
13111
13112 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13113 *pGCPtrEff = u64EffAddr;
13114 else
13115 {
13116 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13117 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13118 }
13119 }
13120
13121 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13122 return VINF_SUCCESS;
13123}
13124
13125
13126/**
13127 * Calculates the effective address of a ModR/M memory operand.
13128 *
13129 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13130 *
13131 * @return Strict VBox status code.
13132 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13133 * @param bRm The ModRM byte.
13134 * @param cbImm The size of any immediate following the
13135 * effective address opcode bytes. Important for
13136 * RIP relative addressing.
13137 * @param pGCPtrEff Where to return the effective address.
13138 * @param offRsp RSP displacement.
13139 */
13140IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13141{
13142 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13143# define SET_SS_DEF() \
13144 do \
13145 { \
13146 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13147 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13148 } while (0)
13149
13150 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13151 {
13152/** @todo Check the effective address size crap! */
13153 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13154 {
13155 uint16_t u16EffAddr;
13156
13157 /* Handle the disp16 form with no registers first. */
13158 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13159 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13160 else
13161 {
13162 /* Get the displacment. */
13163 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13164 {
13165 case 0: u16EffAddr = 0; break;
13166 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13167 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13168 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13169 }
13170
13171 /* Add the base and index registers to the disp. */
13172 switch (bRm & X86_MODRM_RM_MASK)
13173 {
13174 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13175 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13176 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13177 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13178 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13179 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13180 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13181 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13182 }
13183 }
13184
13185 *pGCPtrEff = u16EffAddr;
13186 }
13187 else
13188 {
13189 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13190 uint32_t u32EffAddr;
13191
13192 /* Handle the disp32 form with no registers first. */
13193 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13194 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13195 else
13196 {
13197 /* Get the register (or SIB) value. */
13198 switch ((bRm & X86_MODRM_RM_MASK))
13199 {
13200 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13201 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13202 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13203 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13204 case 4: /* SIB */
13205 {
13206 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13207
13208 /* Get the index and scale it. */
13209 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13210 {
13211 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13212 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13213 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13214 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13215 case 4: u32EffAddr = 0; /*none */ break;
13216 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13217 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13218 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13219 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13220 }
13221 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13222
13223 /* add base */
13224 switch (bSib & X86_SIB_BASE_MASK)
13225 {
13226 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13227 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13228 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13229 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13230 case 4:
13231 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13232 SET_SS_DEF();
13233 break;
13234 case 5:
13235 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13236 {
13237 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13238 SET_SS_DEF();
13239 }
13240 else
13241 {
13242 uint32_t u32Disp;
13243 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13244 u32EffAddr += u32Disp;
13245 }
13246 break;
13247 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13248 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13249 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13250 }
13251 break;
13252 }
13253 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13254 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13255 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13256 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13257 }
13258
13259 /* Get and add the displacement. */
13260 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13261 {
13262 case 0:
13263 break;
13264 case 1:
13265 {
13266 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13267 u32EffAddr += i8Disp;
13268 break;
13269 }
13270 case 2:
13271 {
13272 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13273 u32EffAddr += u32Disp;
13274 break;
13275 }
13276 default:
13277 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13278 }
13279
13280 }
13281 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13282 *pGCPtrEff = u32EffAddr;
13283 else
13284 {
13285 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13286 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13287 }
13288 }
13289 }
13290 else
13291 {
13292 uint64_t u64EffAddr;
13293
13294 /* Handle the rip+disp32 form with no registers first. */
13295 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13296 {
13297 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13298 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13299 }
13300 else
13301 {
13302 /* Get the register (or SIB) value. */
13303 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13304 {
13305 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13306 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13307 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13308 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13309 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13310 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13311 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13312 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13313 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13314 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13315 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13316 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13317 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13318 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13319 /* SIB */
13320 case 4:
13321 case 12:
13322 {
13323 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13324
13325 /* Get the index and scale it. */
13326 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13327 {
13328 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13329 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13330 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13331 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13332 case 4: u64EffAddr = 0; /*none */ break;
13333 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13334 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13335 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13336 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13337 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13338 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13339 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13340 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13341 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13342 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13343 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13344 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13345 }
13346 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13347
13348 /* add base */
13349 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13350 {
13351 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13352 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13353 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13354 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13355 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13356 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13357 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13358 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13359 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13360 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13361 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13362 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13363 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13364 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13365 /* complicated encodings */
13366 case 5:
13367 case 13:
13368 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13369 {
13370 if (!pVCpu->iem.s.uRexB)
13371 {
13372 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13373 SET_SS_DEF();
13374 }
13375 else
13376 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13377 }
13378 else
13379 {
13380 uint32_t u32Disp;
13381 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13382 u64EffAddr += (int32_t)u32Disp;
13383 }
13384 break;
13385 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13386 }
13387 break;
13388 }
13389 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13390 }
13391
13392 /* Get and add the displacement. */
13393 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13394 {
13395 case 0:
13396 break;
13397 case 1:
13398 {
13399 int8_t i8Disp;
13400 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13401 u64EffAddr += i8Disp;
13402 break;
13403 }
13404 case 2:
13405 {
13406 uint32_t u32Disp;
13407 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13408 u64EffAddr += (int32_t)u32Disp;
13409 break;
13410 }
13411 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13412 }
13413
13414 }
13415
13416 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13417 *pGCPtrEff = u64EffAddr;
13418 else
13419 {
13420 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13421 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13422 }
13423 }
13424
13425 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13426 return VINF_SUCCESS;
13427}
13428
13429
13430#ifdef IEM_WITH_SETJMP
13431/**
13432 * Calculates the effective address of a ModR/M memory operand.
13433 *
13434 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13435 *
13436 * May longjmp on internal error.
13437 *
13438 * @return The effective address.
13439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13440 * @param bRm The ModRM byte.
13441 * @param cbImm The size of any immediate following the
13442 * effective address opcode bytes. Important for
13443 * RIP relative addressing.
13444 */
13445IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13446{
13447 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13448# define SET_SS_DEF() \
13449 do \
13450 { \
13451 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13452 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13453 } while (0)
13454
13455 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13456 {
13457/** @todo Check the effective address size crap! */
13458 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13459 {
13460 uint16_t u16EffAddr;
13461
13462 /* Handle the disp16 form with no registers first. */
13463 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13464 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13465 else
13466 {
13467 /* Get the displacment. */
13468 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13469 {
13470 case 0: u16EffAddr = 0; break;
13471 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13472 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13473 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13474 }
13475
13476 /* Add the base and index registers to the disp. */
13477 switch (bRm & X86_MODRM_RM_MASK)
13478 {
13479 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13480 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13481 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13482 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13483 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13484 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13485 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13486 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13487 }
13488 }
13489
13490 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13491 return u16EffAddr;
13492 }
13493
13494 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13495 uint32_t u32EffAddr;
13496
13497 /* Handle the disp32 form with no registers first. */
13498 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13499 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13500 else
13501 {
13502 /* Get the register (or SIB) value. */
13503 switch ((bRm & X86_MODRM_RM_MASK))
13504 {
13505 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13506 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13507 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13508 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13509 case 4: /* SIB */
13510 {
13511 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13512
13513 /* Get the index and scale it. */
13514 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13515 {
13516 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13517 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13518 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13519 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13520 case 4: u32EffAddr = 0; /*none */ break;
13521 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13522 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13523 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13524 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13525 }
13526 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13527
13528 /* add base */
13529 switch (bSib & X86_SIB_BASE_MASK)
13530 {
13531 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13532 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13533 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13534 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13535 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13536 case 5:
13537 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13538 {
13539 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13540 SET_SS_DEF();
13541 }
13542 else
13543 {
13544 uint32_t u32Disp;
13545 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13546 u32EffAddr += u32Disp;
13547 }
13548 break;
13549 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13550 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13551 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13552 }
13553 break;
13554 }
13555 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13556 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13557 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13558 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13559 }
13560
13561 /* Get and add the displacement. */
13562 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13563 {
13564 case 0:
13565 break;
13566 case 1:
13567 {
13568 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13569 u32EffAddr += i8Disp;
13570 break;
13571 }
13572 case 2:
13573 {
13574 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13575 u32EffAddr += u32Disp;
13576 break;
13577 }
13578 default:
13579 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13580 }
13581 }
13582
13583 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13584 {
13585 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13586 return u32EffAddr;
13587 }
13588 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13589 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13590 return u32EffAddr & UINT16_MAX;
13591 }
13592
13593 uint64_t u64EffAddr;
13594
13595 /* Handle the rip+disp32 form with no registers first. */
13596 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13597 {
13598 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13599 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13600 }
13601 else
13602 {
13603 /* Get the register (or SIB) value. */
13604 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13605 {
13606 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13607 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13608 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13609 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13610 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13611 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13612 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13613 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13614 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13615 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13616 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13617 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13618 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13619 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13620 /* SIB */
13621 case 4:
13622 case 12:
13623 {
13624 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13625
13626 /* Get the index and scale it. */
13627 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13628 {
13629 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13630 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13631 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13632 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13633 case 4: u64EffAddr = 0; /*none */ break;
13634 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13635 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13636 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13637 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13638 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13639 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13640 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13641 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13642 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13643 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13644 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13645 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13646 }
13647 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13648
13649 /* add base */
13650 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13651 {
13652 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13653 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13654 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13655 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13656 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13657 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13658 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13659 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13660 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13661 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13662 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13663 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13664 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13665 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13666 /* complicated encodings */
13667 case 5:
13668 case 13:
13669 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13670 {
13671 if (!pVCpu->iem.s.uRexB)
13672 {
13673 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13674 SET_SS_DEF();
13675 }
13676 else
13677 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13678 }
13679 else
13680 {
13681 uint32_t u32Disp;
13682 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13683 u64EffAddr += (int32_t)u32Disp;
13684 }
13685 break;
13686 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13687 }
13688 break;
13689 }
13690 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13691 }
13692
13693 /* Get and add the displacement. */
13694 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13695 {
13696 case 0:
13697 break;
13698 case 1:
13699 {
13700 int8_t i8Disp;
13701 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13702 u64EffAddr += i8Disp;
13703 break;
13704 }
13705 case 2:
13706 {
13707 uint32_t u32Disp;
13708 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13709 u64EffAddr += (int32_t)u32Disp;
13710 break;
13711 }
13712 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13713 }
13714
13715 }
13716
13717 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13718 {
13719 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13720 return u64EffAddr;
13721 }
13722 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13723 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13724 return u64EffAddr & UINT32_MAX;
13725}
13726#endif /* IEM_WITH_SETJMP */
13727
13728/** @} */
13729
13730
13731
13732/*
13733 * Include the instructions
13734 */
13735#include "IEMAllInstructions.cpp.h"
13736
13737
13738
13739#ifdef LOG_ENABLED
13740/**
13741 * Logs the current instruction.
13742 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13743 * @param fSameCtx Set if we have the same context information as the VMM,
13744 * clear if we may have already executed an instruction in
13745 * our debug context. When clear, we assume IEMCPU holds
13746 * valid CPU mode info.
13747 *
13748 * The @a fSameCtx parameter is now misleading and obsolete.
13749 * @param pszFunction The IEM function doing the execution.
13750 */
13751IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13752{
13753# ifdef IN_RING3
13754 if (LogIs2Enabled())
13755 {
13756 char szInstr[256];
13757 uint32_t cbInstr = 0;
13758 if (fSameCtx)
13759 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13760 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13761 szInstr, sizeof(szInstr), &cbInstr);
13762 else
13763 {
13764 uint32_t fFlags = 0;
13765 switch (pVCpu->iem.s.enmCpuMode)
13766 {
13767 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13768 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13769 case IEMMODE_16BIT:
13770 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13771 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13772 else
13773 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13774 break;
13775 }
13776 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13777 szInstr, sizeof(szInstr), &cbInstr);
13778 }
13779
13780 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
13781 Log2(("**** %s\n"
13782 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13783 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13784 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13785 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13786 " %s\n"
13787 , pszFunction,
13788 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13789 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13790 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13791 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13792 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13793 szInstr));
13794
13795 if (LogIs3Enabled())
13796 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13797 }
13798 else
13799# endif
13800 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13801 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13802 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13803}
13804#endif /* LOG_ENABLED */
13805
13806
13807#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13808/**
13809 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
13810 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
13811 *
13812 * @returns Modified rcStrict.
13813 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13814 * @param rcStrict The instruction execution status.
13815 */
13816static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13817{
13818 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
13819 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
13820 {
13821 /* VMX preemption timer takes priority over NMI-window exits. */
13822 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13823 {
13824 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13825 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13826 }
13827 /*
13828 * Check remaining intercepts.
13829 *
13830 * NMI-window and Interrupt-window VM-exits.
13831 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13832 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13833 *
13834 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13835 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13836 */
13837 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
13838 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13839 && !TRPMHasTrap(pVCpu))
13840 {
13841 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
13842 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13843 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
13844 {
13845 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13846 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13847 }
13848 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13849 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
13850 {
13851 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13852 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13853 }
13854 }
13855 }
13856 /* TPR-below threshold/APIC write has the highest priority. */
13857 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13858 {
13859 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13860 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13861 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13862 }
13863 /* MTF takes priority over VMX-preemption timer. */
13864 else
13865 {
13866 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13867 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13868 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13869 }
13870 return rcStrict;
13871}
13872#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13873
13874
13875/**
13876 * Makes status code addjustments (pass up from I/O and access handler)
13877 * as well as maintaining statistics.
13878 *
13879 * @returns Strict VBox status code to pass up.
13880 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13881 * @param rcStrict The status from executing an instruction.
13882 */
13883DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13884{
13885 if (rcStrict != VINF_SUCCESS)
13886 {
13887 if (RT_SUCCESS(rcStrict))
13888 {
13889 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13890 || rcStrict == VINF_IOM_R3_IOPORT_READ
13891 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13892 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13893 || rcStrict == VINF_IOM_R3_MMIO_READ
13894 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13895 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13896 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13897 || rcStrict == VINF_CPUM_R3_MSR_READ
13898 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13899 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13900 || rcStrict == VINF_EM_RAW_TO_R3
13901 || rcStrict == VINF_EM_TRIPLE_FAULT
13902 || rcStrict == VINF_GIM_R3_HYPERCALL
13903 /* raw-mode / virt handlers only: */
13904 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13905 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13906 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13907 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13908 || rcStrict == VINF_SELM_SYNC_GDT
13909 || rcStrict == VINF_CSAM_PENDING_ACTION
13910 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13911 /* nested hw.virt codes: */
13912 || rcStrict == VINF_VMX_VMEXIT
13913 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
13914 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13915 || rcStrict == VINF_SVM_VMEXIT
13916 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13917/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13918 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13919#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13920 if ( rcStrict == VINF_VMX_VMEXIT
13921 && rcPassUp == VINF_SUCCESS)
13922 rcStrict = VINF_SUCCESS;
13923 else
13924#endif
13925#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13926 if ( rcStrict == VINF_SVM_VMEXIT
13927 && rcPassUp == VINF_SUCCESS)
13928 rcStrict = VINF_SUCCESS;
13929 else
13930#endif
13931 if (rcPassUp == VINF_SUCCESS)
13932 pVCpu->iem.s.cRetInfStatuses++;
13933 else if ( rcPassUp < VINF_EM_FIRST
13934 || rcPassUp > VINF_EM_LAST
13935 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13936 {
13937 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13938 pVCpu->iem.s.cRetPassUpStatus++;
13939 rcStrict = rcPassUp;
13940 }
13941 else
13942 {
13943 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13944 pVCpu->iem.s.cRetInfStatuses++;
13945 }
13946 }
13947 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13948 pVCpu->iem.s.cRetAspectNotImplemented++;
13949 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13950 pVCpu->iem.s.cRetInstrNotImplemented++;
13951 else
13952 pVCpu->iem.s.cRetErrStatuses++;
13953 }
13954 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13955 {
13956 pVCpu->iem.s.cRetPassUpStatus++;
13957 rcStrict = pVCpu->iem.s.rcPassUp;
13958 }
13959
13960 return rcStrict;
13961}
13962
13963
13964/**
13965 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13966 * IEMExecOneWithPrefetchedByPC.
13967 *
13968 * Similar code is found in IEMExecLots.
13969 *
13970 * @return Strict VBox status code.
13971 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13972 * @param fExecuteInhibit If set, execute the instruction following CLI,
13973 * POP SS and MOV SS,GR.
13974 * @param pszFunction The calling function name.
13975 */
13976DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13977{
13978 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13979 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13980 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
13981 RT_NOREF_PV(pszFunction);
13982
13983#ifdef IEM_WITH_SETJMP
13984 VBOXSTRICTRC rcStrict;
13985 jmp_buf JmpBuf;
13986 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
13987 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
13988 if ((rcStrict = setjmp(JmpBuf)) == 0)
13989 {
13990 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13991 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13992 }
13993 else
13994 pVCpu->iem.s.cLongJumps++;
13995 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
13996#else
13997 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
13998 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
13999#endif
14000 if (rcStrict == VINF_SUCCESS)
14001 pVCpu->iem.s.cInstructions++;
14002 if (pVCpu->iem.s.cActiveMappings > 0)
14003 {
14004 Assert(rcStrict != VINF_SUCCESS);
14005 iemMemRollback(pVCpu);
14006 }
14007 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14008 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14009 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14010
14011//#ifdef DEBUG
14012// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14013//#endif
14014
14015#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14016 /*
14017 * Perform any VMX nested-guest instruction boundary actions.
14018 *
14019 * If any of these causes a VM-exit, we must skip executing the next
14020 * instruction (would run into stale page tables). A VM-exit makes sure
14021 * there is no interrupt-inhibition, so that should ensure we don't go
14022 * to try execute the next instruction. Clearing fExecuteInhibit is
14023 * problematic because of the setjmp/longjmp clobbering above.
14024 */
14025 if ( rcStrict == VINF_SUCCESS
14026 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14027 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14028 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14029#endif
14030
14031 /* Execute the next instruction as well if a cli, pop ss or
14032 mov ss, Gr has just completed successfully. */
14033 if ( fExecuteInhibit
14034 && rcStrict == VINF_SUCCESS
14035 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14036 && EMIsInhibitInterruptsActive(pVCpu))
14037 {
14038 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
14039 if (rcStrict == VINF_SUCCESS)
14040 {
14041#ifdef LOG_ENABLED
14042 iemLogCurInstr(pVCpu, false, pszFunction);
14043#endif
14044#ifdef IEM_WITH_SETJMP
14045 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14046 if ((rcStrict = setjmp(JmpBuf)) == 0)
14047 {
14048 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14049 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14050 }
14051 else
14052 pVCpu->iem.s.cLongJumps++;
14053 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14054#else
14055 IEM_OPCODE_GET_NEXT_U8(&b);
14056 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14057#endif
14058 if (rcStrict == VINF_SUCCESS)
14059 pVCpu->iem.s.cInstructions++;
14060 if (pVCpu->iem.s.cActiveMappings > 0)
14061 {
14062 Assert(rcStrict != VINF_SUCCESS);
14063 iemMemRollback(pVCpu);
14064 }
14065 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14066 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14067 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14068 }
14069 else if (pVCpu->iem.s.cActiveMappings > 0)
14070 iemMemRollback(pVCpu);
14071 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14072 }
14073
14074 /*
14075 * Return value fiddling, statistics and sanity assertions.
14076 */
14077 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14078
14079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14080 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14081 return rcStrict;
14082}
14083
14084
14085/**
14086 * Execute one instruction.
14087 *
14088 * @return Strict VBox status code.
14089 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14090 */
14091VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14092{
14093 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
14094#ifdef LOG_ENABLED
14095 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14096#endif
14097
14098 /*
14099 * Do the decoding and emulation.
14100 */
14101 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14102 if (rcStrict == VINF_SUCCESS)
14103 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14104 else if (pVCpu->iem.s.cActiveMappings > 0)
14105 iemMemRollback(pVCpu);
14106
14107 if (rcStrict != VINF_SUCCESS)
14108 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14109 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14110 return rcStrict;
14111}
14112
14113
14114VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14115{
14116 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14117
14118 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14119 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14120 if (rcStrict == VINF_SUCCESS)
14121 {
14122 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14123 if (pcbWritten)
14124 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14125 }
14126 else if (pVCpu->iem.s.cActiveMappings > 0)
14127 iemMemRollback(pVCpu);
14128
14129 return rcStrict;
14130}
14131
14132
14133VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14134 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14135{
14136 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14137
14138 VBOXSTRICTRC rcStrict;
14139 if ( cbOpcodeBytes
14140 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14141 {
14142 iemInitDecoder(pVCpu, false, false);
14143#ifdef IEM_WITH_CODE_TLB
14144 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14145 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14146 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14147 pVCpu->iem.s.offCurInstrStart = 0;
14148 pVCpu->iem.s.offInstrNextByte = 0;
14149#else
14150 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14151 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14152#endif
14153 rcStrict = VINF_SUCCESS;
14154 }
14155 else
14156 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14157 if (rcStrict == VINF_SUCCESS)
14158 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14159 else if (pVCpu->iem.s.cActiveMappings > 0)
14160 iemMemRollback(pVCpu);
14161
14162 return rcStrict;
14163}
14164
14165
14166VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14167{
14168 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14169
14170 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14171 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14172 if (rcStrict == VINF_SUCCESS)
14173 {
14174 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14175 if (pcbWritten)
14176 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14177 }
14178 else if (pVCpu->iem.s.cActiveMappings > 0)
14179 iemMemRollback(pVCpu);
14180
14181 return rcStrict;
14182}
14183
14184
14185VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14186 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14187{
14188 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14189
14190 VBOXSTRICTRC rcStrict;
14191 if ( cbOpcodeBytes
14192 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14193 {
14194 iemInitDecoder(pVCpu, true, false);
14195#ifdef IEM_WITH_CODE_TLB
14196 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14197 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14198 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14199 pVCpu->iem.s.offCurInstrStart = 0;
14200 pVCpu->iem.s.offInstrNextByte = 0;
14201#else
14202 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14203 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14204#endif
14205 rcStrict = VINF_SUCCESS;
14206 }
14207 else
14208 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14209 if (rcStrict == VINF_SUCCESS)
14210 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14211 else if (pVCpu->iem.s.cActiveMappings > 0)
14212 iemMemRollback(pVCpu);
14213
14214 return rcStrict;
14215}
14216
14217
14218/**
14219 * For debugging DISGetParamSize, may come in handy.
14220 *
14221 * @returns Strict VBox status code.
14222 * @param pVCpu The cross context virtual CPU structure of the
14223 * calling EMT.
14224 * @param pCtxCore The context core structure.
14225 * @param OpcodeBytesPC The PC of the opcode bytes.
14226 * @param pvOpcodeBytes Prefeched opcode bytes.
14227 * @param cbOpcodeBytes Number of prefetched bytes.
14228 * @param pcbWritten Where to return the number of bytes written.
14229 * Optional.
14230 */
14231VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14232 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14233 uint32_t *pcbWritten)
14234{
14235 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14236
14237 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14238 VBOXSTRICTRC rcStrict;
14239 if ( cbOpcodeBytes
14240 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14241 {
14242 iemInitDecoder(pVCpu, true, false);
14243#ifdef IEM_WITH_CODE_TLB
14244 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14245 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14246 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14247 pVCpu->iem.s.offCurInstrStart = 0;
14248 pVCpu->iem.s.offInstrNextByte = 0;
14249#else
14250 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14251 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14252#endif
14253 rcStrict = VINF_SUCCESS;
14254 }
14255 else
14256 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14257 if (rcStrict == VINF_SUCCESS)
14258 {
14259 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14260 if (pcbWritten)
14261 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14262 }
14263 else if (pVCpu->iem.s.cActiveMappings > 0)
14264 iemMemRollback(pVCpu);
14265
14266 return rcStrict;
14267}
14268
14269
14270/**
14271 * For handling split cacheline lock operations when the host has split-lock
14272 * detection enabled.
14273 *
14274 * This will cause the interpreter to disregard the lock prefix and implicit
14275 * locking (xchg).
14276 *
14277 * @returns Strict VBox status code.
14278 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14279 */
14280VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
14281{
14282 /*
14283 * Do the decoding and emulation.
14284 */
14285 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
14286 if (rcStrict == VINF_SUCCESS)
14287 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
14288 else if (pVCpu->iem.s.cActiveMappings > 0)
14289 iemMemRollback(pVCpu);
14290
14291 if (rcStrict != VINF_SUCCESS)
14292 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14293 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14294 return rcStrict;
14295}
14296
14297
14298VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14299{
14300 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14301 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14302
14303 /*
14304 * See if there is an interrupt pending in TRPM, inject it if we can.
14305 */
14306 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14307#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14308 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14309 if (fIntrEnabled)
14310 {
14311 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14312 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14313 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14314 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14315 else
14316 {
14317 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14318 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14319 }
14320 }
14321#else
14322 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14323#endif
14324
14325 /** @todo What if we are injecting an exception and not an interrupt? Is that
14326 * possible here? For now we assert it is indeed only an interrupt. */
14327 if ( fIntrEnabled
14328 && TRPMHasTrap(pVCpu)
14329 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14330 {
14331 uint8_t u8TrapNo;
14332 TRPMEVENT enmType;
14333 uint32_t uErrCode;
14334 RTGCPTR uCr2;
14335 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14336 AssertRC(rc2);
14337 Assert(enmType == TRPM_HARDWARE_INT);
14338 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14339 TRPMResetTrap(pVCpu);
14340#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14341 /* Injecting an event may cause a VM-exit. */
14342 if ( rcStrict != VINF_SUCCESS
14343 && rcStrict != VINF_IEM_RAISED_XCPT)
14344 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14345#else
14346 NOREF(rcStrict);
14347#endif
14348 }
14349
14350 /*
14351 * Initial decoder init w/ prefetch, then setup setjmp.
14352 */
14353 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14354 if (rcStrict == VINF_SUCCESS)
14355 {
14356#ifdef IEM_WITH_SETJMP
14357 jmp_buf JmpBuf;
14358 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14359 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14360 pVCpu->iem.s.cActiveMappings = 0;
14361 if ((rcStrict = setjmp(JmpBuf)) == 0)
14362#endif
14363 {
14364 /*
14365 * The run loop. We limit ourselves to 4096 instructions right now.
14366 */
14367 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14368 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14369 for (;;)
14370 {
14371 /*
14372 * Log the state.
14373 */
14374#ifdef LOG_ENABLED
14375 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14376#endif
14377
14378 /*
14379 * Do the decoding and emulation.
14380 */
14381 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14382 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14383 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14384 {
14385 Assert(pVCpu->iem.s.cActiveMappings == 0);
14386 pVCpu->iem.s.cInstructions++;
14387 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14388 {
14389 uint64_t fCpu = pVCpu->fLocalForcedActions
14390 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14391 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14392 | VMCPU_FF_TLB_FLUSH
14393 | VMCPU_FF_INHIBIT_INTERRUPTS
14394 | VMCPU_FF_BLOCK_NMIS
14395 | VMCPU_FF_UNHALT ));
14396
14397 if (RT_LIKELY( ( !fCpu
14398 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14399 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14400 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14401 {
14402 if (cMaxInstructionsGccStupidity-- > 0)
14403 {
14404 /* Poll timers every now an then according to the caller's specs. */
14405 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14406 || !TMTimerPollBool(pVM, pVCpu))
14407 {
14408 Assert(pVCpu->iem.s.cActiveMappings == 0);
14409 iemReInitDecoder(pVCpu);
14410 continue;
14411 }
14412 }
14413 }
14414 }
14415 Assert(pVCpu->iem.s.cActiveMappings == 0);
14416 }
14417 else if (pVCpu->iem.s.cActiveMappings > 0)
14418 iemMemRollback(pVCpu);
14419 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14420 break;
14421 }
14422 }
14423#ifdef IEM_WITH_SETJMP
14424 else
14425 {
14426 if (pVCpu->iem.s.cActiveMappings > 0)
14427 iemMemRollback(pVCpu);
14428# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14429 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14430# endif
14431 pVCpu->iem.s.cLongJumps++;
14432 }
14433 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14434#endif
14435
14436 /*
14437 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14438 */
14439 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14440 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14441 }
14442 else
14443 {
14444 if (pVCpu->iem.s.cActiveMappings > 0)
14445 iemMemRollback(pVCpu);
14446
14447#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14448 /*
14449 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14450 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14451 */
14452 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14453#endif
14454 }
14455
14456 /*
14457 * Maybe re-enter raw-mode and log.
14458 */
14459 if (rcStrict != VINF_SUCCESS)
14460 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14461 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14462 if (pcInstructions)
14463 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14464 return rcStrict;
14465}
14466
14467
14468/**
14469 * Interface used by EMExecuteExec, does exit statistics and limits.
14470 *
14471 * @returns Strict VBox status code.
14472 * @param pVCpu The cross context virtual CPU structure.
14473 * @param fWillExit To be defined.
14474 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14475 * @param cMaxInstructions Maximum number of instructions to execute.
14476 * @param cMaxInstructionsWithoutExits
14477 * The max number of instructions without exits.
14478 * @param pStats Where to return statistics.
14479 */
14480VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14481 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14482{
14483 NOREF(fWillExit); /** @todo define flexible exit crits */
14484
14485 /*
14486 * Initialize return stats.
14487 */
14488 pStats->cInstructions = 0;
14489 pStats->cExits = 0;
14490 pStats->cMaxExitDistance = 0;
14491 pStats->cReserved = 0;
14492
14493 /*
14494 * Initial decoder init w/ prefetch, then setup setjmp.
14495 */
14496 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14497 if (rcStrict == VINF_SUCCESS)
14498 {
14499#ifdef IEM_WITH_SETJMP
14500 jmp_buf JmpBuf;
14501 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14502 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14503 pVCpu->iem.s.cActiveMappings = 0;
14504 if ((rcStrict = setjmp(JmpBuf)) == 0)
14505#endif
14506 {
14507#ifdef IN_RING0
14508 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14509#endif
14510 uint32_t cInstructionSinceLastExit = 0;
14511
14512 /*
14513 * The run loop. We limit ourselves to 4096 instructions right now.
14514 */
14515 PVM pVM = pVCpu->CTX_SUFF(pVM);
14516 for (;;)
14517 {
14518 /*
14519 * Log the state.
14520 */
14521#ifdef LOG_ENABLED
14522 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14523#endif
14524
14525 /*
14526 * Do the decoding and emulation.
14527 */
14528 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14529
14530 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14531 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14532
14533 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14534 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14535 {
14536 pStats->cExits += 1;
14537 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14538 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14539 cInstructionSinceLastExit = 0;
14540 }
14541
14542 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14543 {
14544 Assert(pVCpu->iem.s.cActiveMappings == 0);
14545 pVCpu->iem.s.cInstructions++;
14546 pStats->cInstructions++;
14547 cInstructionSinceLastExit++;
14548 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14549 {
14550 uint64_t fCpu = pVCpu->fLocalForcedActions
14551 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14552 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14553 | VMCPU_FF_TLB_FLUSH
14554 | VMCPU_FF_INHIBIT_INTERRUPTS
14555 | VMCPU_FF_BLOCK_NMIS
14556 | VMCPU_FF_UNHALT ));
14557
14558 if (RT_LIKELY( ( ( !fCpu
14559 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14560 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14561 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14562 || pStats->cInstructions < cMinInstructions))
14563 {
14564 if (pStats->cInstructions < cMaxInstructions)
14565 {
14566 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14567 {
14568#ifdef IN_RING0
14569 if ( !fCheckPreemptionPending
14570 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14571#endif
14572 {
14573 Assert(pVCpu->iem.s.cActiveMappings == 0);
14574 iemReInitDecoder(pVCpu);
14575 continue;
14576 }
14577#ifdef IN_RING0
14578 rcStrict = VINF_EM_RAW_INTERRUPT;
14579 break;
14580#endif
14581 }
14582 }
14583 }
14584 Assert(!(fCpu & VMCPU_FF_IEM));
14585 }
14586 Assert(pVCpu->iem.s.cActiveMappings == 0);
14587 }
14588 else if (pVCpu->iem.s.cActiveMappings > 0)
14589 iemMemRollback(pVCpu);
14590 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14591 break;
14592 }
14593 }
14594#ifdef IEM_WITH_SETJMP
14595 else
14596 {
14597 if (pVCpu->iem.s.cActiveMappings > 0)
14598 iemMemRollback(pVCpu);
14599 pVCpu->iem.s.cLongJumps++;
14600 }
14601 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14602#endif
14603
14604 /*
14605 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14606 */
14607 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14608 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14609 }
14610 else
14611 {
14612 if (pVCpu->iem.s.cActiveMappings > 0)
14613 iemMemRollback(pVCpu);
14614
14615#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14616 /*
14617 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14618 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14619 */
14620 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14621#endif
14622 }
14623
14624 /*
14625 * Maybe re-enter raw-mode and log.
14626 */
14627 if (rcStrict != VINF_SUCCESS)
14628 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14629 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14630 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14631 return rcStrict;
14632}
14633
14634
14635/**
14636 * Injects a trap, fault, abort, software interrupt or external interrupt.
14637 *
14638 * The parameter list matches TRPMQueryTrapAll pretty closely.
14639 *
14640 * @returns Strict VBox status code.
14641 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14642 * @param u8TrapNo The trap number.
14643 * @param enmType What type is it (trap/fault/abort), software
14644 * interrupt or hardware interrupt.
14645 * @param uErrCode The error code if applicable.
14646 * @param uCr2 The CR2 value if applicable.
14647 * @param cbInstr The instruction length (only relevant for
14648 * software interrupts).
14649 */
14650VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14651 uint8_t cbInstr)
14652{
14653 iemInitDecoder(pVCpu, false, false);
14654#ifdef DBGFTRACE_ENABLED
14655 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14656 u8TrapNo, enmType, uErrCode, uCr2);
14657#endif
14658
14659 uint32_t fFlags;
14660 switch (enmType)
14661 {
14662 case TRPM_HARDWARE_INT:
14663 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14664 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14665 uErrCode = uCr2 = 0;
14666 break;
14667
14668 case TRPM_SOFTWARE_INT:
14669 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14670 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14671 uErrCode = uCr2 = 0;
14672 break;
14673
14674 case TRPM_TRAP:
14675 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14676 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14677 if (u8TrapNo == X86_XCPT_PF)
14678 fFlags |= IEM_XCPT_FLAGS_CR2;
14679 switch (u8TrapNo)
14680 {
14681 case X86_XCPT_DF:
14682 case X86_XCPT_TS:
14683 case X86_XCPT_NP:
14684 case X86_XCPT_SS:
14685 case X86_XCPT_PF:
14686 case X86_XCPT_AC:
14687 case X86_XCPT_GP:
14688 fFlags |= IEM_XCPT_FLAGS_ERR;
14689 break;
14690 }
14691 break;
14692
14693 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14694 }
14695
14696 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14697
14698 if (pVCpu->iem.s.cActiveMappings > 0)
14699 iemMemRollback(pVCpu);
14700
14701 return rcStrict;
14702}
14703
14704
14705/**
14706 * Injects the active TRPM event.
14707 *
14708 * @returns Strict VBox status code.
14709 * @param pVCpu The cross context virtual CPU structure.
14710 */
14711VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14712{
14713#ifndef IEM_IMPLEMENTS_TASKSWITCH
14714 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14715#else
14716 uint8_t u8TrapNo;
14717 TRPMEVENT enmType;
14718 uint32_t uErrCode;
14719 RTGCUINTPTR uCr2;
14720 uint8_t cbInstr;
14721 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
14722 if (RT_FAILURE(rc))
14723 return rc;
14724
14725 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
14726 * ICEBP \#DB injection as a special case. */
14727 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14728#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14729 if (rcStrict == VINF_SVM_VMEXIT)
14730 rcStrict = VINF_SUCCESS;
14731#endif
14732#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14733 if (rcStrict == VINF_VMX_VMEXIT)
14734 rcStrict = VINF_SUCCESS;
14735#endif
14736 /** @todo Are there any other codes that imply the event was successfully
14737 * delivered to the guest? See @bugref{6607}. */
14738 if ( rcStrict == VINF_SUCCESS
14739 || rcStrict == VINF_IEM_RAISED_XCPT)
14740 TRPMResetTrap(pVCpu);
14741
14742 return rcStrict;
14743#endif
14744}
14745
14746
14747VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14748{
14749 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14750 return VERR_NOT_IMPLEMENTED;
14751}
14752
14753
14754VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14755{
14756 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14757 return VERR_NOT_IMPLEMENTED;
14758}
14759
14760
14761#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14762/**
14763 * Executes a IRET instruction with default operand size.
14764 *
14765 * This is for PATM.
14766 *
14767 * @returns VBox status code.
14768 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14769 * @param pCtxCore The register frame.
14770 */
14771VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14772{
14773 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14774
14775 iemCtxCoreToCtx(pCtx, pCtxCore);
14776 iemInitDecoder(pVCpu);
14777 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14778 if (rcStrict == VINF_SUCCESS)
14779 iemCtxToCtxCore(pCtxCore, pCtx);
14780 else
14781 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14782 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14783 return rcStrict;
14784}
14785#endif
14786
14787
14788/**
14789 * Macro used by the IEMExec* method to check the given instruction length.
14790 *
14791 * Will return on failure!
14792 *
14793 * @param a_cbInstr The given instruction length.
14794 * @param a_cbMin The minimum length.
14795 */
14796#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14797 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14798 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14799
14800
14801/**
14802 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14803 *
14804 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14805 *
14806 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14808 * @param rcStrict The status code to fiddle.
14809 */
14810DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14811{
14812 iemUninitExec(pVCpu);
14813 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14814}
14815
14816
14817/**
14818 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14819 *
14820 * This API ASSUMES that the caller has already verified that the guest code is
14821 * allowed to access the I/O port. (The I/O port is in the DX register in the
14822 * guest state.)
14823 *
14824 * @returns Strict VBox status code.
14825 * @param pVCpu The cross context virtual CPU structure.
14826 * @param cbValue The size of the I/O port access (1, 2, or 4).
14827 * @param enmAddrMode The addressing mode.
14828 * @param fRepPrefix Indicates whether a repeat prefix is used
14829 * (doesn't matter which for this instruction).
14830 * @param cbInstr The instruction length in bytes.
14831 * @param iEffSeg The effective segment address.
14832 * @param fIoChecked Whether the access to the I/O port has been
14833 * checked or not. It's typically checked in the
14834 * HM scenario.
14835 */
14836VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14837 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14838{
14839 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14840 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14841
14842 /*
14843 * State init.
14844 */
14845 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14846
14847 /*
14848 * Switch orgy for getting to the right handler.
14849 */
14850 VBOXSTRICTRC rcStrict;
14851 if (fRepPrefix)
14852 {
14853 switch (enmAddrMode)
14854 {
14855 case IEMMODE_16BIT:
14856 switch (cbValue)
14857 {
14858 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14859 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14860 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14861 default:
14862 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14863 }
14864 break;
14865
14866 case IEMMODE_32BIT:
14867 switch (cbValue)
14868 {
14869 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14870 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14871 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14872 default:
14873 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14874 }
14875 break;
14876
14877 case IEMMODE_64BIT:
14878 switch (cbValue)
14879 {
14880 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14881 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14882 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14883 default:
14884 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14885 }
14886 break;
14887
14888 default:
14889 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14890 }
14891 }
14892 else
14893 {
14894 switch (enmAddrMode)
14895 {
14896 case IEMMODE_16BIT:
14897 switch (cbValue)
14898 {
14899 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14900 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14901 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14902 default:
14903 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14904 }
14905 break;
14906
14907 case IEMMODE_32BIT:
14908 switch (cbValue)
14909 {
14910 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14911 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14912 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14913 default:
14914 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14915 }
14916 break;
14917
14918 case IEMMODE_64BIT:
14919 switch (cbValue)
14920 {
14921 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14922 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14923 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14924 default:
14925 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14926 }
14927 break;
14928
14929 default:
14930 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14931 }
14932 }
14933
14934 if (pVCpu->iem.s.cActiveMappings)
14935 iemMemRollback(pVCpu);
14936
14937 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14938}
14939
14940
14941/**
14942 * Interface for HM and EM for executing string I/O IN (read) instructions.
14943 *
14944 * This API ASSUMES that the caller has already verified that the guest code is
14945 * allowed to access the I/O port. (The I/O port is in the DX register in the
14946 * guest state.)
14947 *
14948 * @returns Strict VBox status code.
14949 * @param pVCpu The cross context virtual CPU structure.
14950 * @param cbValue The size of the I/O port access (1, 2, or 4).
14951 * @param enmAddrMode The addressing mode.
14952 * @param fRepPrefix Indicates whether a repeat prefix is used
14953 * (doesn't matter which for this instruction).
14954 * @param cbInstr The instruction length in bytes.
14955 * @param fIoChecked Whether the access to the I/O port has been
14956 * checked or not. It's typically checked in the
14957 * HM scenario.
14958 */
14959VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14960 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14961{
14962 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14963
14964 /*
14965 * State init.
14966 */
14967 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14968
14969 /*
14970 * Switch orgy for getting to the right handler.
14971 */
14972 VBOXSTRICTRC rcStrict;
14973 if (fRepPrefix)
14974 {
14975 switch (enmAddrMode)
14976 {
14977 case IEMMODE_16BIT:
14978 switch (cbValue)
14979 {
14980 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
14981 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
14982 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
14983 default:
14984 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14985 }
14986 break;
14987
14988 case IEMMODE_32BIT:
14989 switch (cbValue)
14990 {
14991 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
14992 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
14993 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
14994 default:
14995 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14996 }
14997 break;
14998
14999 case IEMMODE_64BIT:
15000 switch (cbValue)
15001 {
15002 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15003 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15004 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15005 default:
15006 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15007 }
15008 break;
15009
15010 default:
15011 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15012 }
15013 }
15014 else
15015 {
15016 switch (enmAddrMode)
15017 {
15018 case IEMMODE_16BIT:
15019 switch (cbValue)
15020 {
15021 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15022 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15023 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15024 default:
15025 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15026 }
15027 break;
15028
15029 case IEMMODE_32BIT:
15030 switch (cbValue)
15031 {
15032 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15033 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15034 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15035 default:
15036 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15037 }
15038 break;
15039
15040 case IEMMODE_64BIT:
15041 switch (cbValue)
15042 {
15043 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15044 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15045 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15046 default:
15047 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15048 }
15049 break;
15050
15051 default:
15052 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15053 }
15054 }
15055
15056 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15057 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15058}
15059
15060
15061/**
15062 * Interface for rawmode to write execute an OUT instruction.
15063 *
15064 * @returns Strict VBox status code.
15065 * @param pVCpu The cross context virtual CPU structure.
15066 * @param cbInstr The instruction length in bytes.
15067 * @param u16Port The port to read.
15068 * @param fImm Whether the port is specified using an immediate operand or
15069 * using the implicit DX register.
15070 * @param cbReg The register size.
15071 *
15072 * @remarks In ring-0 not all of the state needs to be synced in.
15073 */
15074VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15075{
15076 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15077 Assert(cbReg <= 4 && cbReg != 3);
15078
15079 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15080 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15081 Assert(!pVCpu->iem.s.cActiveMappings);
15082 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15083}
15084
15085
15086/**
15087 * Interface for rawmode to write execute an IN instruction.
15088 *
15089 * @returns Strict VBox status code.
15090 * @param pVCpu The cross context virtual CPU structure.
15091 * @param cbInstr The instruction length in bytes.
15092 * @param u16Port The port to read.
15093 * @param fImm Whether the port is specified using an immediate operand or
15094 * using the implicit DX.
15095 * @param cbReg The register size.
15096 */
15097VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15098{
15099 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15100 Assert(cbReg <= 4 && cbReg != 3);
15101
15102 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15103 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15104 Assert(!pVCpu->iem.s.cActiveMappings);
15105 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15106}
15107
15108
15109/**
15110 * Interface for HM and EM to write to a CRx register.
15111 *
15112 * @returns Strict VBox status code.
15113 * @param pVCpu The cross context virtual CPU structure.
15114 * @param cbInstr The instruction length in bytes.
15115 * @param iCrReg The control register number (destination).
15116 * @param iGReg The general purpose register number (source).
15117 *
15118 * @remarks In ring-0 not all of the state needs to be synced in.
15119 */
15120VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15121{
15122 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15123 Assert(iCrReg < 16);
15124 Assert(iGReg < 16);
15125
15126 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15127 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15128 Assert(!pVCpu->iem.s.cActiveMappings);
15129 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15130}
15131
15132
15133/**
15134 * Interface for HM and EM to read from a CRx register.
15135 *
15136 * @returns Strict VBox status code.
15137 * @param pVCpu The cross context virtual CPU structure.
15138 * @param cbInstr The instruction length in bytes.
15139 * @param iGReg The general purpose register number (destination).
15140 * @param iCrReg The control register number (source).
15141 *
15142 * @remarks In ring-0 not all of the state needs to be synced in.
15143 */
15144VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15145{
15146 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15147 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15148 | CPUMCTX_EXTRN_APIC_TPR);
15149 Assert(iCrReg < 16);
15150 Assert(iGReg < 16);
15151
15152 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15153 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15154 Assert(!pVCpu->iem.s.cActiveMappings);
15155 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15156}
15157
15158
15159/**
15160 * Interface for HM and EM to clear the CR0[TS] bit.
15161 *
15162 * @returns Strict VBox status code.
15163 * @param pVCpu The cross context virtual CPU structure.
15164 * @param cbInstr The instruction length in bytes.
15165 *
15166 * @remarks In ring-0 not all of the state needs to be synced in.
15167 */
15168VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15169{
15170 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15171
15172 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15173 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15174 Assert(!pVCpu->iem.s.cActiveMappings);
15175 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15176}
15177
15178
15179/**
15180 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15181 *
15182 * @returns Strict VBox status code.
15183 * @param pVCpu The cross context virtual CPU structure.
15184 * @param cbInstr The instruction length in bytes.
15185 * @param uValue The value to load into CR0.
15186 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15187 * memory operand. Otherwise pass NIL_RTGCPTR.
15188 *
15189 * @remarks In ring-0 not all of the state needs to be synced in.
15190 */
15191VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15192{
15193 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15194
15195 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15196 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15197 Assert(!pVCpu->iem.s.cActiveMappings);
15198 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15199}
15200
15201
15202/**
15203 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15204 *
15205 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15206 *
15207 * @returns Strict VBox status code.
15208 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15209 * @param cbInstr The instruction length in bytes.
15210 * @remarks In ring-0 not all of the state needs to be synced in.
15211 * @thread EMT(pVCpu)
15212 */
15213VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15214{
15215 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15216
15217 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15218 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15219 Assert(!pVCpu->iem.s.cActiveMappings);
15220 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15221}
15222
15223
15224/**
15225 * Interface for HM and EM to emulate the WBINVD instruction.
15226 *
15227 * @returns Strict VBox status code.
15228 * @param pVCpu The cross context virtual CPU structure.
15229 * @param cbInstr The instruction length in bytes.
15230 *
15231 * @remarks In ring-0 not all of the state needs to be synced in.
15232 */
15233VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15234{
15235 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15236
15237 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15238 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15239 Assert(!pVCpu->iem.s.cActiveMappings);
15240 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15241}
15242
15243
15244/**
15245 * Interface for HM and EM to emulate the INVD instruction.
15246 *
15247 * @returns Strict VBox status code.
15248 * @param pVCpu The cross context virtual CPU structure.
15249 * @param cbInstr The instruction length in bytes.
15250 *
15251 * @remarks In ring-0 not all of the state needs to be synced in.
15252 */
15253VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15254{
15255 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15256
15257 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15258 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15259 Assert(!pVCpu->iem.s.cActiveMappings);
15260 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15261}
15262
15263
15264/**
15265 * Interface for HM and EM to emulate the INVLPG instruction.
15266 *
15267 * @returns Strict VBox status code.
15268 * @retval VINF_PGM_SYNC_CR3
15269 *
15270 * @param pVCpu The cross context virtual CPU structure.
15271 * @param cbInstr The instruction length in bytes.
15272 * @param GCPtrPage The effective address of the page to invalidate.
15273 *
15274 * @remarks In ring-0 not all of the state needs to be synced in.
15275 */
15276VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15277{
15278 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15279
15280 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15281 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15282 Assert(!pVCpu->iem.s.cActiveMappings);
15283 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15284}
15285
15286
15287/**
15288 * Interface for HM and EM to emulate the INVPCID instruction.
15289 *
15290 * @returns Strict VBox status code.
15291 * @retval VINF_PGM_SYNC_CR3
15292 *
15293 * @param pVCpu The cross context virtual CPU structure.
15294 * @param cbInstr The instruction length in bytes.
15295 * @param iEffSeg The effective segment register.
15296 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15297 * @param uType The invalidation type.
15298 *
15299 * @remarks In ring-0 not all of the state needs to be synced in.
15300 */
15301VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15302 uint64_t uType)
15303{
15304 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15305
15306 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15307 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15308 Assert(!pVCpu->iem.s.cActiveMappings);
15309 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15310}
15311
15312
15313/**
15314 * Interface for HM and EM to emulate the CPUID instruction.
15315 *
15316 * @returns Strict VBox status code.
15317 *
15318 * @param pVCpu The cross context virtual CPU structure.
15319 * @param cbInstr The instruction length in bytes.
15320 *
15321 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15322 */
15323VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15324{
15325 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15326 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15327
15328 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15329 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15330 Assert(!pVCpu->iem.s.cActiveMappings);
15331 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15332}
15333
15334
15335/**
15336 * Interface for HM and EM to emulate the RDPMC instruction.
15337 *
15338 * @returns Strict VBox status code.
15339 *
15340 * @param pVCpu The cross context virtual CPU structure.
15341 * @param cbInstr The instruction length in bytes.
15342 *
15343 * @remarks Not all of the state needs to be synced in.
15344 */
15345VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15346{
15347 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15348 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15349
15350 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15351 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15352 Assert(!pVCpu->iem.s.cActiveMappings);
15353 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15354}
15355
15356
15357/**
15358 * Interface for HM and EM to emulate the RDTSC instruction.
15359 *
15360 * @returns Strict VBox status code.
15361 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15362 *
15363 * @param pVCpu The cross context virtual CPU structure.
15364 * @param cbInstr The instruction length in bytes.
15365 *
15366 * @remarks Not all of the state needs to be synced in.
15367 */
15368VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15369{
15370 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15371 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15372
15373 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15374 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15375 Assert(!pVCpu->iem.s.cActiveMappings);
15376 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15377}
15378
15379
15380/**
15381 * Interface for HM and EM to emulate the RDTSCP instruction.
15382 *
15383 * @returns Strict VBox status code.
15384 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15385 *
15386 * @param pVCpu The cross context virtual CPU structure.
15387 * @param cbInstr The instruction length in bytes.
15388 *
15389 * @remarks Not all of the state needs to be synced in. Recommended
15390 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15391 */
15392VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15393{
15394 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15395 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15396
15397 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15398 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15399 Assert(!pVCpu->iem.s.cActiveMappings);
15400 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15401}
15402
15403
15404/**
15405 * Interface for HM and EM to emulate the RDMSR instruction.
15406 *
15407 * @returns Strict VBox status code.
15408 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15409 *
15410 * @param pVCpu The cross context virtual CPU structure.
15411 * @param cbInstr The instruction length in bytes.
15412 *
15413 * @remarks Not all of the state needs to be synced in. Requires RCX and
15414 * (currently) all MSRs.
15415 */
15416VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15417{
15418 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15419 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15420
15421 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15422 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15423 Assert(!pVCpu->iem.s.cActiveMappings);
15424 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15425}
15426
15427
15428/**
15429 * Interface for HM and EM to emulate the WRMSR instruction.
15430 *
15431 * @returns Strict VBox status code.
15432 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15433 *
15434 * @param pVCpu The cross context virtual CPU structure.
15435 * @param cbInstr The instruction length in bytes.
15436 *
15437 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15438 * and (currently) all MSRs.
15439 */
15440VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15441{
15442 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15443 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15444 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15445
15446 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15447 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15448 Assert(!pVCpu->iem.s.cActiveMappings);
15449 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15450}
15451
15452
15453/**
15454 * Interface for HM and EM to emulate the MONITOR instruction.
15455 *
15456 * @returns Strict VBox status code.
15457 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15458 *
15459 * @param pVCpu The cross context virtual CPU structure.
15460 * @param cbInstr The instruction length in bytes.
15461 *
15462 * @remarks Not all of the state needs to be synced in.
15463 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15464 * are used.
15465 */
15466VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15467{
15468 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15469 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15470
15471 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15472 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15473 Assert(!pVCpu->iem.s.cActiveMappings);
15474 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15475}
15476
15477
15478/**
15479 * Interface for HM and EM to emulate the MWAIT instruction.
15480 *
15481 * @returns Strict VBox status code.
15482 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15483 *
15484 * @param pVCpu The cross context virtual CPU structure.
15485 * @param cbInstr The instruction length in bytes.
15486 *
15487 * @remarks Not all of the state needs to be synced in.
15488 */
15489VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15490{
15491 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15492 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15493
15494 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15495 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15496 Assert(!pVCpu->iem.s.cActiveMappings);
15497 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15498}
15499
15500
15501/**
15502 * Interface for HM and EM to emulate the HLT instruction.
15503 *
15504 * @returns Strict VBox status code.
15505 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15506 *
15507 * @param pVCpu The cross context virtual CPU structure.
15508 * @param cbInstr The instruction length in bytes.
15509 *
15510 * @remarks Not all of the state needs to be synced in.
15511 */
15512VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15513{
15514 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15515
15516 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15517 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15518 Assert(!pVCpu->iem.s.cActiveMappings);
15519 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15520}
15521
15522
15523/**
15524 * Checks if IEM is in the process of delivering an event (interrupt or
15525 * exception).
15526 *
15527 * @returns true if we're in the process of raising an interrupt or exception,
15528 * false otherwise.
15529 * @param pVCpu The cross context virtual CPU structure.
15530 * @param puVector Where to store the vector associated with the
15531 * currently delivered event, optional.
15532 * @param pfFlags Where to store th event delivery flags (see
15533 * IEM_XCPT_FLAGS_XXX), optional.
15534 * @param puErr Where to store the error code associated with the
15535 * event, optional.
15536 * @param puCr2 Where to store the CR2 associated with the event,
15537 * optional.
15538 * @remarks The caller should check the flags to determine if the error code and
15539 * CR2 are valid for the event.
15540 */
15541VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15542{
15543 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15544 if (fRaisingXcpt)
15545 {
15546 if (puVector)
15547 *puVector = pVCpu->iem.s.uCurXcpt;
15548 if (pfFlags)
15549 *pfFlags = pVCpu->iem.s.fCurXcpt;
15550 if (puErr)
15551 *puErr = pVCpu->iem.s.uCurXcptErr;
15552 if (puCr2)
15553 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15554 }
15555 return fRaisingXcpt;
15556}
15557
15558#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15559
15560/**
15561 * Interface for HM and EM to emulate the CLGI instruction.
15562 *
15563 * @returns Strict VBox status code.
15564 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15565 * @param cbInstr The instruction length in bytes.
15566 * @thread EMT(pVCpu)
15567 */
15568VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15569{
15570 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15571
15572 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15573 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15574 Assert(!pVCpu->iem.s.cActiveMappings);
15575 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15576}
15577
15578
15579/**
15580 * Interface for HM and EM to emulate the STGI instruction.
15581 *
15582 * @returns Strict VBox status code.
15583 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15584 * @param cbInstr The instruction length in bytes.
15585 * @thread EMT(pVCpu)
15586 */
15587VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15588{
15589 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15590
15591 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15592 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15593 Assert(!pVCpu->iem.s.cActiveMappings);
15594 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15595}
15596
15597
15598/**
15599 * Interface for HM and EM to emulate the VMLOAD instruction.
15600 *
15601 * @returns Strict VBox status code.
15602 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15603 * @param cbInstr The instruction length in bytes.
15604 * @thread EMT(pVCpu)
15605 */
15606VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15607{
15608 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15609
15610 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15611 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15612 Assert(!pVCpu->iem.s.cActiveMappings);
15613 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15614}
15615
15616
15617/**
15618 * Interface for HM and EM to emulate the VMSAVE instruction.
15619 *
15620 * @returns Strict VBox status code.
15621 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15622 * @param cbInstr The instruction length in bytes.
15623 * @thread EMT(pVCpu)
15624 */
15625VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15626{
15627 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15628
15629 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15630 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15631 Assert(!pVCpu->iem.s.cActiveMappings);
15632 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15633}
15634
15635
15636/**
15637 * Interface for HM and EM to emulate the INVLPGA instruction.
15638 *
15639 * @returns Strict VBox status code.
15640 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15641 * @param cbInstr The instruction length in bytes.
15642 * @thread EMT(pVCpu)
15643 */
15644VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15645{
15646 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15647
15648 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15649 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15650 Assert(!pVCpu->iem.s.cActiveMappings);
15651 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15652}
15653
15654
15655/**
15656 * Interface for HM and EM to emulate the VMRUN instruction.
15657 *
15658 * @returns Strict VBox status code.
15659 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15660 * @param cbInstr The instruction length in bytes.
15661 * @thread EMT(pVCpu)
15662 */
15663VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15664{
15665 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15666 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15667
15668 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15669 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15670 Assert(!pVCpu->iem.s.cActiveMappings);
15671 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15672}
15673
15674
15675/**
15676 * Interface for HM and EM to emulate \#VMEXIT.
15677 *
15678 * @returns Strict VBox status code.
15679 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15680 * @param uExitCode The exit code.
15681 * @param uExitInfo1 The exit info. 1 field.
15682 * @param uExitInfo2 The exit info. 2 field.
15683 * @thread EMT(pVCpu)
15684 */
15685VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15686{
15687 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15688 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15689 if (pVCpu->iem.s.cActiveMappings)
15690 iemMemRollback(pVCpu);
15691 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15692}
15693
15694#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15695
15696#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15697
15698/**
15699 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15700 *
15701 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15702 * are performed. Bounds checks are strict builds only.
15703 *
15704 * @param pVmcs Pointer to the virtual VMCS.
15705 * @param u64VmcsField The VMCS field.
15706 * @param pu64Dst Where to store the VMCS value.
15707 *
15708 * @remarks May be called with interrupts disabled.
15709 * @todo This should probably be moved to CPUM someday.
15710 */
15711VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15712{
15713 AssertPtr(pVmcs);
15714 AssertPtr(pu64Dst);
15715 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15716}
15717
15718
15719/**
15720 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15721 *
15722 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15723 * are performed. Bounds checks are strict builds only.
15724 *
15725 * @param pVmcs Pointer to the virtual VMCS.
15726 * @param u64VmcsField The VMCS field.
15727 * @param u64Val The value to write.
15728 *
15729 * @remarks May be called with interrupts disabled.
15730 * @todo This should probably be moved to CPUM someday.
15731 */
15732VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15733{
15734 AssertPtr(pVmcs);
15735 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15736}
15737
15738
15739/**
15740 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15741 *
15742 * @returns Strict VBox status code.
15743 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15744 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15745 * the x2APIC device.
15746 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15747 *
15748 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15749 * @param idMsr The MSR being read.
15750 * @param pu64Value Pointer to the value being written or where to store the
15751 * value being read.
15752 * @param fWrite Whether this is an MSR write or read access.
15753 * @thread EMT(pVCpu)
15754 */
15755VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15756{
15757 Assert(pu64Value);
15758
15759 VBOXSTRICTRC rcStrict;
15760 if (fWrite)
15761 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15762 else
15763 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15764 Assert(!pVCpu->iem.s.cActiveMappings);
15765 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15766
15767}
15768
15769
15770/**
15771 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15772 *
15773 * @returns Strict VBox status code.
15774 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15775 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15776 *
15777 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15778 * @param pExitInfo Pointer to the VM-exit information.
15779 * @param pExitEventInfo Pointer to the VM-exit event information.
15780 * @thread EMT(pVCpu)
15781 */
15782VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15783{
15784 Assert(pExitInfo);
15785 Assert(pExitEventInfo);
15786 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15787 Assert(!pVCpu->iem.s.cActiveMappings);
15788 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15789
15790}
15791
15792
15793/**
15794 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15795 * VM-exit.
15796 *
15797 * @returns Strict VBox status code.
15798 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15799 * @thread EMT(pVCpu)
15800 */
15801VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15802{
15803 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15804 Assert(!pVCpu->iem.s.cActiveMappings);
15805 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15806}
15807
15808
15809/**
15810 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15811 *
15812 * @returns Strict VBox status code.
15813 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15814 * @thread EMT(pVCpu)
15815 */
15816VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15817{
15818 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15819 Assert(!pVCpu->iem.s.cActiveMappings);
15820 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15821}
15822
15823
15824/**
15825 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15826 *
15827 * @returns Strict VBox status code.
15828 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15829 * @param uVector The external interrupt vector (pass 0 if the external
15830 * interrupt is still pending).
15831 * @param fIntPending Whether the external interrupt is pending or
15832 * acknowdledged in the interrupt controller.
15833 * @thread EMT(pVCpu)
15834 */
15835VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15836{
15837 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15838 Assert(!pVCpu->iem.s.cActiveMappings);
15839 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15840}
15841
15842
15843/**
15844 * Interface for HM and EM to emulate VM-exit due to exceptions.
15845 *
15846 * Exception includes NMIs, software exceptions (those generated by INT3 or
15847 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15848 *
15849 * @returns Strict VBox status code.
15850 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15851 * @param pExitInfo Pointer to the VM-exit information.
15852 * @param pExitEventInfo Pointer to the VM-exit event information.
15853 * @thread EMT(pVCpu)
15854 */
15855VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15856{
15857 Assert(pExitInfo);
15858 Assert(pExitEventInfo);
15859 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15860 Assert(!pVCpu->iem.s.cActiveMappings);
15861 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15862}
15863
15864
15865/**
15866 * Interface for HM and EM to emulate VM-exit due to NMIs.
15867 *
15868 * @returns Strict VBox status code.
15869 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15870 * @thread EMT(pVCpu)
15871 */
15872VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15873{
15874 VMXVEXITINFO ExitInfo;
15875 RT_ZERO(ExitInfo);
15876 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
15877
15878 VMXVEXITEVENTINFO ExitEventInfo;
15879 RT_ZERO(ExitEventInfo);
15880 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15881 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15882 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15883
15884 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15885 Assert(!pVCpu->iem.s.cActiveMappings);
15886 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15887}
15888
15889
15890/**
15891 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15892 *
15893 * @returns Strict VBox status code.
15894 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15895 * @thread EMT(pVCpu)
15896 */
15897VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15898{
15899 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15900 Assert(!pVCpu->iem.s.cActiveMappings);
15901 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15902}
15903
15904
15905/**
15906 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15907 *
15908 * @returns Strict VBox status code.
15909 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15910 * @param uVector The SIPI vector.
15911 * @thread EMT(pVCpu)
15912 */
15913VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15914{
15915 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15916 Assert(!pVCpu->iem.s.cActiveMappings);
15917 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15918}
15919
15920
15921/**
15922 * Interface for HM and EM to emulate a VM-exit.
15923 *
15924 * If a specialized version of a VM-exit handler exists, that must be used instead.
15925 *
15926 * @returns Strict VBox status code.
15927 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15928 * @param uExitReason The VM-exit reason.
15929 * @param u64ExitQual The Exit qualification.
15930 * @thread EMT(pVCpu)
15931 */
15932VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15933{
15934 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15935 Assert(!pVCpu->iem.s.cActiveMappings);
15936 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15937}
15938
15939
15940/**
15941 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15942 *
15943 * This is meant to be used for those instructions that VMX provides additional
15944 * decoding information beyond just the instruction length!
15945 *
15946 * @returns Strict VBox status code.
15947 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15948 * @param pExitInfo Pointer to the VM-exit information.
15949 * @thread EMT(pVCpu)
15950 */
15951VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15952{
15953 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15954 Assert(!pVCpu->iem.s.cActiveMappings);
15955 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15956}
15957
15958
15959/**
15960 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15961 *
15962 * This is meant to be used for those instructions that VMX provides only the
15963 * instruction length.
15964 *
15965 * @returns Strict VBox status code.
15966 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15967 * @param pExitInfo Pointer to the VM-exit information.
15968 * @param cbInstr The instruction length in bytes.
15969 * @thread EMT(pVCpu)
15970 */
15971VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15972{
15973 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15974 Assert(!pVCpu->iem.s.cActiveMappings);
15975 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15976}
15977
15978
15979/**
15980 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
15981 * Virtualized-EOI, TPR-below threshold).
15982 *
15983 * @returns Strict VBox status code.
15984 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15985 * @param pExitInfo Pointer to the VM-exit information.
15986 * @thread EMT(pVCpu)
15987 */
15988VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15989{
15990 Assert(pExitInfo);
15991 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
15992 Assert(!pVCpu->iem.s.cActiveMappings);
15993 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15994}
15995
15996
15997/**
15998 * Interface for HM and EM to emulate a VM-exit due to a task switch.
15999 *
16000 * @returns Strict VBox status code.
16001 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16002 * @param pExitInfo Pointer to the VM-exit information.
16003 * @param pExitEventInfo Pointer to the VM-exit event information.
16004 * @thread EMT(pVCpu)
16005 */
16006VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16007{
16008 Assert(pExitInfo);
16009 Assert(pExitEventInfo);
16010 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16011 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16012 Assert(!pVCpu->iem.s.cActiveMappings);
16013 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16014}
16015
16016
16017/**
16018 * Interface for HM and EM to emulate the VMREAD instruction.
16019 *
16020 * @returns Strict VBox status code.
16021 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16022 * @param pExitInfo Pointer to the VM-exit information.
16023 * @thread EMT(pVCpu)
16024 */
16025VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16026{
16027 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16028 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16029 Assert(pExitInfo);
16030
16031 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16032
16033 VBOXSTRICTRC rcStrict;
16034 uint8_t const cbInstr = pExitInfo->cbInstr;
16035 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16036 uint64_t const u64FieldEnc = fIs64BitMode
16037 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16038 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16039 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16040 {
16041 if (fIs64BitMode)
16042 {
16043 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16044 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16045 }
16046 else
16047 {
16048 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16049 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16050 }
16051 }
16052 else
16053 {
16054 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16055 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16056 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16057 }
16058 Assert(!pVCpu->iem.s.cActiveMappings);
16059 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16060}
16061
16062
16063/**
16064 * Interface for HM and EM to emulate the VMWRITE instruction.
16065 *
16066 * @returns Strict VBox status code.
16067 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16068 * @param pExitInfo Pointer to the VM-exit information.
16069 * @thread EMT(pVCpu)
16070 */
16071VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16072{
16073 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16074 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16075 Assert(pExitInfo);
16076
16077 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16078
16079 uint64_t u64Val;
16080 uint8_t iEffSeg;
16081 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16082 {
16083 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16084 iEffSeg = UINT8_MAX;
16085 }
16086 else
16087 {
16088 u64Val = pExitInfo->GCPtrEffAddr;
16089 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16090 }
16091 uint8_t const cbInstr = pExitInfo->cbInstr;
16092 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16093 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16094 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16095 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16096 Assert(!pVCpu->iem.s.cActiveMappings);
16097 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16098}
16099
16100
16101/**
16102 * Interface for HM and EM to emulate the VMPTRLD instruction.
16103 *
16104 * @returns Strict VBox status code.
16105 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16106 * @param pExitInfo Pointer to the VM-exit information.
16107 * @thread EMT(pVCpu)
16108 */
16109VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16110{
16111 Assert(pExitInfo);
16112 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16113 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16114
16115 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16116
16117 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16118 uint8_t const cbInstr = pExitInfo->cbInstr;
16119 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16120 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16121 Assert(!pVCpu->iem.s.cActiveMappings);
16122 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16123}
16124
16125
16126/**
16127 * Interface for HM and EM to emulate the VMPTRST instruction.
16128 *
16129 * @returns Strict VBox status code.
16130 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16131 * @param pExitInfo Pointer to the VM-exit information.
16132 * @thread EMT(pVCpu)
16133 */
16134VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16135{
16136 Assert(pExitInfo);
16137 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16138 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16139
16140 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16141
16142 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16143 uint8_t const cbInstr = pExitInfo->cbInstr;
16144 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16145 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16146 Assert(!pVCpu->iem.s.cActiveMappings);
16147 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16148}
16149
16150
16151/**
16152 * Interface for HM and EM to emulate the VMCLEAR instruction.
16153 *
16154 * @returns Strict VBox status code.
16155 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16156 * @param pExitInfo Pointer to the VM-exit information.
16157 * @thread EMT(pVCpu)
16158 */
16159VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16160{
16161 Assert(pExitInfo);
16162 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16163 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16164
16165 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16166
16167 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16168 uint8_t const cbInstr = pExitInfo->cbInstr;
16169 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16170 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16171 Assert(!pVCpu->iem.s.cActiveMappings);
16172 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16173}
16174
16175
16176/**
16177 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16178 *
16179 * @returns Strict VBox status code.
16180 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16181 * @param cbInstr The instruction length in bytes.
16182 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16183 * VMXINSTRID_VMRESUME).
16184 * @thread EMT(pVCpu)
16185 */
16186VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16187{
16188 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16189 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16190
16191 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16192 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16193 Assert(!pVCpu->iem.s.cActiveMappings);
16194 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16195}
16196
16197
16198/**
16199 * Interface for HM and EM to emulate the VMXON instruction.
16200 *
16201 * @returns Strict VBox status code.
16202 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16203 * @param pExitInfo Pointer to the VM-exit information.
16204 * @thread EMT(pVCpu)
16205 */
16206VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16207{
16208 Assert(pExitInfo);
16209 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16210 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16211
16212 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16213
16214 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16215 uint8_t const cbInstr = pExitInfo->cbInstr;
16216 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16217 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16218 Assert(!pVCpu->iem.s.cActiveMappings);
16219 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16220}
16221
16222
16223/**
16224 * Interface for HM and EM to emulate the VMXOFF instruction.
16225 *
16226 * @returns Strict VBox status code.
16227 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16228 * @param cbInstr The instruction length in bytes.
16229 * @thread EMT(pVCpu)
16230 */
16231VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16232{
16233 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16234 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16235
16236 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16237 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16238 Assert(!pVCpu->iem.s.cActiveMappings);
16239 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16240}
16241
16242
16243/**
16244 * Interface for HM and EM to emulate the INVVPID instruction.
16245 *
16246 * @returns Strict VBox status code.
16247 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16248 * @param pExitInfo Pointer to the VM-exit information.
16249 * @thread EMT(pVCpu)
16250 */
16251VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16252{
16253 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16254 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_HM_VMX_MASK);
16255 Assert(pExitInfo);
16256
16257 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16258
16259 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16260 uint8_t const cbInstr = pExitInfo->cbInstr;
16261 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16262 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16263 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16264 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16265 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16266 Assert(!pVCpu->iem.s.cActiveMappings);
16267 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16268}
16269
16270
16271/**
16272 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16273 *
16274 * @remarks The @a pvUser argument is currently unused.
16275 */
16276PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16277 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16278 PGMACCESSORIGIN enmOrigin, void *pvUser)
16279{
16280 RT_NOREF3(pvPhys, enmOrigin, pvUser);
16281
16282 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16283 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16284 {
16285 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16286 Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16287
16288 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16289 * Currently they will go through as read accesses. */
16290 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16291 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16292 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16293 if (RT_FAILURE(rcStrict))
16294 return rcStrict;
16295
16296 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16297 return VINF_SUCCESS;
16298 }
16299
16300 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16301 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16302 if (RT_FAILURE(rc))
16303 return rc;
16304
16305 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16306 return VINF_PGM_HANDLER_DO_DEFAULT;
16307}
16308
16309#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16310
16311#ifdef IN_RING3
16312
16313/**
16314 * Handles the unlikely and probably fatal merge cases.
16315 *
16316 * @returns Merged status code.
16317 * @param rcStrict Current EM status code.
16318 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16319 * with @a rcStrict.
16320 * @param iMemMap The memory mapping index. For error reporting only.
16321 * @param pVCpu The cross context virtual CPU structure of the calling
16322 * thread, for error reporting only.
16323 */
16324DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16325 unsigned iMemMap, PVMCPUCC pVCpu)
16326{
16327 if (RT_FAILURE_NP(rcStrict))
16328 return rcStrict;
16329
16330 if (RT_FAILURE_NP(rcStrictCommit))
16331 return rcStrictCommit;
16332
16333 if (rcStrict == rcStrictCommit)
16334 return rcStrictCommit;
16335
16336 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16337 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16338 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16339 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16340 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16341 return VERR_IOM_FF_STATUS_IPE;
16342}
16343
16344
16345/**
16346 * Helper for IOMR3ProcessForceFlag.
16347 *
16348 * @returns Merged status code.
16349 * @param rcStrict Current EM status code.
16350 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16351 * with @a rcStrict.
16352 * @param iMemMap The memory mapping index. For error reporting only.
16353 * @param pVCpu The cross context virtual CPU structure of the calling
16354 * thread, for error reporting only.
16355 */
16356DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16357{
16358 /* Simple. */
16359 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16360 return rcStrictCommit;
16361
16362 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16363 return rcStrict;
16364
16365 /* EM scheduling status codes. */
16366 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16367 && rcStrict <= VINF_EM_LAST))
16368 {
16369 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16370 && rcStrictCommit <= VINF_EM_LAST))
16371 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16372 }
16373
16374 /* Unlikely */
16375 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16376}
16377
16378
16379/**
16380 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16381 *
16382 * @returns Merge between @a rcStrict and what the commit operation returned.
16383 * @param pVM The cross context VM structure.
16384 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16385 * @param rcStrict The status code returned by ring-0 or raw-mode.
16386 */
16387VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16388{
16389 /*
16390 * Reset the pending commit.
16391 */
16392 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16393 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16394 ("%#x %#x %#x\n",
16395 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16396 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16397
16398 /*
16399 * Commit the pending bounce buffers (usually just one).
16400 */
16401 unsigned cBufs = 0;
16402 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16403 while (iMemMap-- > 0)
16404 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16405 {
16406 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16407 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16408 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16409
16410 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16411 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16412 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16413
16414 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16415 {
16416 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16417 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16418 pbBuf,
16419 cbFirst,
16420 PGMACCESSORIGIN_IEM);
16421 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16422 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16423 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16424 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16425 }
16426
16427 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16428 {
16429 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16430 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16431 pbBuf + cbFirst,
16432 cbSecond,
16433 PGMACCESSORIGIN_IEM);
16434 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16435 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16436 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16437 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16438 }
16439 cBufs++;
16440 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16441 }
16442
16443 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16444 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16445 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16446 pVCpu->iem.s.cActiveMappings = 0;
16447 return rcStrict;
16448}
16449
16450#endif /* IN_RING3 */
16451
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette