VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAll.cpp@ 92685

Last change on this file since 92685 was 92685, checked in by vboxsync, 3 years ago

VMM: Nested VMX: bugref:10092 Started with constructing EPT-violation VM-exit for iemInitDecoderAndPrefetchOpcodes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 653.2 KB
Line 
1/* $Id: IEMAll.cpp 92685 2021-12-02 05:59:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2011-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/** @page pg_iem IEM - Interpreted Execution Manager
20 *
21 * The interpreted exeuction manager (IEM) is for executing short guest code
22 * sequences that are causing too many exits / virtualization traps. It will
23 * also be used to interpret single instructions, thus replacing the selective
24 * interpreters in EM and IOM.
25 *
26 * Design goals:
27 * - Relatively small footprint, although we favour speed and correctness
28 * over size.
29 * - Reasonably fast.
30 * - Correctly handle lock prefixed instructions.
31 * - Complete instruction set - eventually.
32 * - Refactorable into a recompiler, maybe.
33 * - Replace EMInterpret*.
34 *
35 * Using the existing disassembler has been considered, however this is thought
36 * to conflict with speed as the disassembler chews things a bit too much while
37 * leaving us with a somewhat complicated state to interpret afterwards.
38 *
39 *
40 * The current code is very much work in progress. You've been warned!
41 *
42 *
43 * @section sec_iem_fpu_instr FPU Instructions
44 *
45 * On x86 and AMD64 hosts, the FPU instructions are implemented by executing the
46 * same or equivalent instructions on the host FPU. To make life easy, we also
47 * let the FPU prioritize the unmasked exceptions for us. This however, only
48 * works reliably when CR0.NE is set, i.e. when using \#MF instead the IRQ 13
49 * for FPU exception delivery, because with CR0.NE=0 there is a window where we
50 * can trigger spurious FPU exceptions.
51 *
52 * The guest FPU state is not loaded into the host CPU and kept there till we
53 * leave IEM because the calling conventions have declared an all year open
54 * season on much of the FPU state. For instance an innocent looking call to
55 * memcpy might end up using a whole bunch of XMM or MM registers if the
56 * particular implementation finds it worthwhile.
57 *
58 *
59 * @section sec_iem_logging Logging
60 *
61 * The IEM code uses the \"IEM\" log group for the main logging. The different
62 * logging levels/flags are generally used for the following purposes:
63 * - Level 1 (Log) : Errors, exceptions, interrupts and such major events.
64 * - Flow (LogFlow): Basic enter/exit IEM state info.
65 * - Level 2 (Log2): ?
66 * - Level 3 (Log3): More detailed enter/exit IEM state info.
67 * - Level 4 (Log4): Decoding mnemonics w/ EIP.
68 * - Level 5 (Log5): Decoding details.
69 * - Level 6 (Log6): Enables/disables the lockstep comparison with REM.
70 * - Level 7 (Log7): iret++ execution logging.
71 * - Level 8 (Log8): Memory writes.
72 * - Level 9 (Log9): Memory reads.
73 *
74 */
75
76//#define IEM_LOG_MEMORY_WRITES
77#define IEM_IMPLEMENTS_TASKSWITCH
78
79/* Disabled warning C4505: 'iemRaisePageFaultJmp' : unreferenced local function has been removed */
80#ifdef _MSC_VER
81# pragma warning(disable:4505)
82#endif
83
84
85/*********************************************************************************************************************************
86* Header Files *
87*********************************************************************************************************************************/
88#define LOG_GROUP LOG_GROUP_IEM
89#define VMCPU_INCL_CPUM_GST_CTX
90#include <VBox/vmm/iem.h>
91#include <VBox/vmm/cpum.h>
92#include <VBox/vmm/apic.h>
93#include <VBox/vmm/pdm.h>
94#include <VBox/vmm/pgm.h>
95#include <VBox/vmm/iom.h>
96#include <VBox/vmm/em.h>
97#include <VBox/vmm/hm.h>
98#include <VBox/vmm/nem.h>
99#include <VBox/vmm/gim.h>
100#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
101# include <VBox/vmm/em.h>
102# include <VBox/vmm/hm_svm.h>
103#endif
104#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
105# include <VBox/vmm/hmvmxinline.h>
106#endif
107#include <VBox/vmm/tm.h>
108#include <VBox/vmm/dbgf.h>
109#include <VBox/vmm/dbgftrace.h>
110#include "IEMInternal.h"
111#include <VBox/vmm/vmcc.h>
112#include <VBox/log.h>
113#include <VBox/err.h>
114#include <VBox/param.h>
115#include <VBox/dis.h>
116#include <VBox/disopcode.h>
117#include <iprt/asm-math.h>
118#include <iprt/assert.h>
119#include <iprt/string.h>
120#include <iprt/x86.h>
121
122
123/*********************************************************************************************************************************
124* Structures and Typedefs *
125*********************************************************************************************************************************/
126/** @typedef PFNIEMOP
127 * Pointer to an opcode decoder function.
128 */
129
130/** @def FNIEMOP_DEF
131 * Define an opcode decoder function.
132 *
133 * We're using macors for this so that adding and removing parameters as well as
134 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
135 *
136 * @param a_Name The function name.
137 */
138
139/** @typedef PFNIEMOPRM
140 * Pointer to an opcode decoder function with RM byte.
141 */
142
143/** @def FNIEMOPRM_DEF
144 * Define an opcode decoder function with RM byte.
145 *
146 * We're using macors for this so that adding and removing parameters as well as
147 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
148 *
149 * @param a_Name The function name.
150 */
151
152#if defined(__GNUC__) && defined(RT_ARCH_X86)
153typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
154typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
155# define FNIEMOP_DEF(a_Name) \
156 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
157# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
158 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
159# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
160 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
161
162#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
163typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
164typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
165# define FNIEMOP_DEF(a_Name) \
166 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
167# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
168 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
169# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
170 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
171
172#elif defined(__GNUC__)
173typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
174typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
175# define FNIEMOP_DEF(a_Name) \
176 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
177# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
178 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
179# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
180 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
181
182#else
183typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
184typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
185# define FNIEMOP_DEF(a_Name) \
186 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) RT_NO_THROW_DEF
187# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
188 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
189# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
190 IEM_STATIC VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
191
192#endif
193#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
194
195
196/**
197 * Selector descriptor table entry as fetched by iemMemFetchSelDesc.
198 */
199typedef union IEMSELDESC
200{
201 /** The legacy view. */
202 X86DESC Legacy;
203 /** The long mode view. */
204 X86DESC64 Long;
205} IEMSELDESC;
206/** Pointer to a selector descriptor table entry. */
207typedef IEMSELDESC *PIEMSELDESC;
208
209/**
210 * CPU exception classes.
211 */
212typedef enum IEMXCPTCLASS
213{
214 IEMXCPTCLASS_BENIGN,
215 IEMXCPTCLASS_CONTRIBUTORY,
216 IEMXCPTCLASS_PAGE_FAULT,
217 IEMXCPTCLASS_DOUBLE_FAULT
218} IEMXCPTCLASS;
219
220
221/*********************************************************************************************************************************
222* Defined Constants And Macros *
223*********************************************************************************************************************************/
224/** @def IEM_WITH_SETJMP
225 * Enables alternative status code handling using setjmps.
226 *
227 * This adds a bit of expense via the setjmp() call since it saves all the
228 * non-volatile registers. However, it eliminates return code checks and allows
229 * for more optimal return value passing (return regs instead of stack buffer).
230 */
231#if defined(DOXYGEN_RUNNING) || defined(RT_OS_WINDOWS) || 1
232# define IEM_WITH_SETJMP
233#endif
234
235/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
236 * due to GCC lacking knowledge about the value range of a switch. */
237#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
238
239/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
240#define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
241
242/**
243 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
244 * occation.
245 */
246#ifdef LOG_ENABLED
247# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
248 do { \
249 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
250 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
251 } while (0)
252#else
253# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
254 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
255#endif
256
257/**
258 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
259 * occation using the supplied logger statement.
260 *
261 * @param a_LoggerArgs What to log on failure.
262 */
263#ifdef LOG_ENABLED
264# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
265 do { \
266 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
267 /*LogFunc(a_LoggerArgs);*/ \
268 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
269 } while (0)
270#else
271# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
272 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
273#endif
274
275/**
276 * Call an opcode decoder function.
277 *
278 * We're using macors for this so that adding and removing parameters can be
279 * done as we please. See FNIEMOP_DEF.
280 */
281#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
282
283/**
284 * Call a common opcode decoder function taking one extra argument.
285 *
286 * We're using macors for this so that adding and removing parameters can be
287 * done as we please. See FNIEMOP_DEF_1.
288 */
289#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
290
291/**
292 * Call a common opcode decoder function taking one extra argument.
293 *
294 * We're using macors for this so that adding and removing parameters can be
295 * done as we please. See FNIEMOP_DEF_1.
296 */
297#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
298
299/**
300 * Check if we're currently executing in real or virtual 8086 mode.
301 *
302 * @returns @c true if it is, @c false if not.
303 * @param a_pVCpu The IEM state of the current CPU.
304 */
305#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (CPUMIsGuestInRealOrV86ModeEx(IEM_GET_CTX(a_pVCpu)))
306
307/**
308 * Check if we're currently executing in virtual 8086 mode.
309 *
310 * @returns @c true if it is, @c false if not.
311 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
312 */
313#define IEM_IS_V86_MODE(a_pVCpu) (CPUMIsGuestInV86ModeEx(IEM_GET_CTX(a_pVCpu)))
314
315/**
316 * Check if we're currently executing in long mode.
317 *
318 * @returns @c true if it is, @c false if not.
319 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
320 */
321#define IEM_IS_LONG_MODE(a_pVCpu) (CPUMIsGuestInLongModeEx(IEM_GET_CTX(a_pVCpu)))
322
323/**
324 * Check if we're currently executing in a 64-bit code segment.
325 *
326 * @returns @c true if it is, @c false if not.
327 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
328 */
329#define IEM_IS_64BIT_CODE(a_pVCpu) (CPUMIsGuestIn64BitCodeEx(IEM_GET_CTX(a_pVCpu)))
330
331/**
332 * Check if we're currently executing in real mode.
333 *
334 * @returns @c true if it is, @c false if not.
335 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
336 */
337#define IEM_IS_REAL_MODE(a_pVCpu) (CPUMIsGuestInRealModeEx(IEM_GET_CTX(a_pVCpu)))
338
339/**
340 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
341 * @returns PCCPUMFEATURES
342 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
343 */
344#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
345
346/**
347 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
348 * @returns PCCPUMFEATURES
349 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
350 */
351#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.HostFeatures))
352
353/**
354 * Evaluates to true if we're presenting an Intel CPU to the guest.
355 */
356#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_INTEL )
357
358/**
359 * Evaluates to true if we're presenting an AMD CPU to the guest.
360 */
361#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) ( (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_AMD || (a_pVCpu)->iem.s.enmCpuVendor == CPUMCPUVENDOR_HYGON )
362
363/**
364 * Check if the address is canonical.
365 */
366#define IEM_IS_CANONICAL(a_u64Addr) X86_IS_CANONICAL(a_u64Addr)
367
368/**
369 * Gets the effective VEX.VVVV value.
370 *
371 * The 4th bit is ignored if not 64-bit code.
372 * @returns effective V-register value.
373 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
374 */
375#define IEM_GET_EFFECTIVE_VVVV(a_pVCpu) \
376 ((a_pVCpu)->iem.s.enmCpuMode == IEMMODE_64BIT ? (a_pVCpu)->iem.s.uVex3rdReg : (a_pVCpu)->iem.s.uVex3rdReg & 7)
377
378/** @def IEM_USE_UNALIGNED_DATA_ACCESS
379 * Use unaligned accesses instead of elaborate byte assembly. */
380#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING)
381# define IEM_USE_UNALIGNED_DATA_ACCESS
382#endif
383
384#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
385
386/**
387 * Check if the guest has entered VMX root operation.
388 */
389# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxRootMode(IEM_GET_CTX(a_pVCpu)))
390
391/**
392 * Check if the guest has entered VMX non-root operation.
393 */
394# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(a_pVCpu)))
395
396/**
397 * Check if the nested-guest has the given Pin-based VM-execution control set.
398 */
399# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_PinCtl) \
400 (CPUMIsGuestVmxPinCtlsSet(IEM_GET_CTX(a_pVCpu), (a_PinCtl)))
401
402/**
403 * Check if the nested-guest has the given Processor-based VM-execution control set.
404 */
405#define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_ProcCtl) \
406 (CPUMIsGuestVmxProcCtlsSet(IEM_GET_CTX(a_pVCpu), (a_ProcCtl)))
407
408/**
409 * Check if the nested-guest has the given Secondary Processor-based VM-execution
410 * control set.
411 */
412#define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_ProcCtl2) \
413 (CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(a_pVCpu), (a_ProcCtl2)))
414
415/**
416 * Invokes the VMX VM-exit handler for an instruction intercept.
417 */
418# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) \
419 do { return iemVmxVmexitInstr((a_pVCpu), (a_uExitReason), (a_cbInstr)); } while (0)
420
421/**
422 * Invokes the VMX VM-exit handler for an instruction intercept where the
423 * instruction provides additional VM-exit information.
424 */
425# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) \
426 do { return iemVmxVmexitInstrNeedsInfo((a_pVCpu), (a_uExitReason), (a_uInstrId), (a_cbInstr)); } while (0)
427
428/**
429 * Invokes the VMX VM-exit handler for a task switch.
430 */
431# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) \
432 do { return iemVmxVmexitTaskSwitch((a_pVCpu), (a_enmTaskSwitch), (a_SelNewTss), (a_cbInstr)); } while (0)
433
434/**
435 * Invokes the VMX VM-exit handler for MWAIT.
436 */
437# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) \
438 do { return iemVmxVmexitInstrMwait((a_pVCpu), (a_fMonitorArmed), (a_cbInstr)); } while (0)
439
440/**
441 * Invokes the VMX VM-exit handler for EPT faults.
442 */
443# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) \
444 do { return iemVmxVmexitEpt(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr); } while (0)
445
446/**
447 * Invokes the VMX VM-exit handler.
448 */
449# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) \
450 do { return iemVmxVmexit((a_pVCpu), (a_uExitReason), (a_uExitQual)); } while (0)
451
452#else
453# define IEM_VMX_IS_ROOT_MODE(a_pVCpu) (false)
454# define IEM_VMX_IS_NON_ROOT_MODE(a_pVCpu) (false)
455# define IEM_VMX_IS_PINCTLS_SET(a_pVCpu, a_cbInstr) (false)
456# define IEM_VMX_IS_PROCCTLS_SET(a_pVCpu, a_cbInstr) (false)
457# define IEM_VMX_IS_PROCCTLS2_SET(a_pVCpu, a_cbInstr) (false)
458# define IEM_VMX_VMEXIT_INSTR_RET(a_pVCpu, a_uExitReason, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
459# define IEM_VMX_VMEXIT_INSTR_NEEDS_INFO_RET(a_pVCpu, a_uExitReason, a_uInstrId, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
460# define IEM_VMX_VMEXIT_TASK_SWITCH_RET(a_pVCpu, a_enmTaskSwitch, a_SelNewTss, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
461# define IEM_VMX_VMEXIT_MWAIT_RET(a_pVCpu, a_fMonitorArmed, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
462# define IEM_VMX_VMEXIT_EPT_RET(a_pVCpu, a_pPtWalk, a_fAccess, a_fSlatFail, a_cbInstr) do { return VERR_VMX_IPE_1; } while (0)
463# define IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(a_pVCpu, a_uExitReason, a_uExitQual) do { return VERR_VMX_IPE_1; } while (0)
464
465#endif
466
467#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
468/**
469 * Check if an SVM control/instruction intercept is set.
470 */
471# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) \
472 (CPUMIsGuestSvmCtrlInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_Intercept)))
473
474/**
475 * Check if an SVM read CRx intercept is set.
476 */
477# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
478 (CPUMIsGuestSvmReadCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
479
480/**
481 * Check if an SVM write CRx intercept is set.
482 */
483# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) \
484 (CPUMIsGuestSvmWriteCRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uCr)))
485
486/**
487 * Check if an SVM read DRx intercept is set.
488 */
489# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
490 (CPUMIsGuestSvmReadDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
491
492/**
493 * Check if an SVM write DRx intercept is set.
494 */
495# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) \
496 (CPUMIsGuestSvmWriteDRxInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uDr)))
497
498/**
499 * Check if an SVM exception intercept is set.
500 */
501# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) \
502 (CPUMIsGuestSvmXcptInterceptSet(a_pVCpu, IEM_GET_CTX(a_pVCpu), (a_uVector)))
503
504/**
505 * Invokes the SVM \#VMEXIT handler for the nested-guest.
506 */
507# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
508 do { return iemSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2)); } while (0)
509
510/**
511 * Invokes the 'MOV CRx' SVM \#VMEXIT handler after constructing the
512 * corresponding decode assist information.
513 */
514# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) \
515 do \
516 { \
517 uint64_t uExitInfo1; \
518 if ( IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmDecodeAssists \
519 && (a_enmAccessCrX) == IEMACCESSCRX_MOV_CRX) \
520 uExitInfo1 = SVM_EXIT1_MOV_CRX_MASK | ((a_iGReg) & 7); \
521 else \
522 uExitInfo1 = 0; \
523 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, uExitInfo1, 0); \
524 } while (0)
525
526/** Check and handles SVM nested-guest instruction intercept and updates
527 * NRIP if needed.
528 */
529# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \
530 do \
531 { \
532 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept)) \
533 { \
534 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
535 IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2); \
536 } \
537 } while (0)
538
539/** Checks and handles SVM nested-guest CR0 read intercept. */
540# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) \
541 do \
542 { \
543 if (!IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, 0)) \
544 { /* probably likely */ } \
545 else \
546 { \
547 IEM_SVM_UPDATE_NRIP(a_pVCpu); \
548 IEM_SVM_VMEXIT_RET(a_pVCpu, SVM_EXIT_READ_CR0, a_uExitInfo1, a_uExitInfo2); \
549 } \
550 } while (0)
551
552/**
553 * Updates the NextRIP (NRI) field in the nested-guest VMCB.
554 */
555# define IEM_SVM_UPDATE_NRIP(a_pVCpu) \
556 do { \
557 if (IEM_GET_GUEST_CPU_FEATURES(a_pVCpu)->fSvmNextRipSave) \
558 CPUMGuestSvmUpdateNRip(a_pVCpu, IEM_GET_CTX(a_pVCpu), IEM_GET_INSTR_LEN(a_pVCpu)); \
559 } while (0)
560
561#else
562# define IEM_SVM_IS_CTRL_INTERCEPT_SET(a_pVCpu, a_Intercept) (false)
563# define IEM_SVM_IS_READ_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
564# define IEM_SVM_IS_WRITE_CR_INTERCEPT_SET(a_pVCpu, a_uCr) (false)
565# define IEM_SVM_IS_READ_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
566# define IEM_SVM_IS_WRITE_DR_INTERCEPT_SET(a_pVCpu, a_uDr) (false)
567# define IEM_SVM_IS_XCPT_INTERCEPT_SET(a_pVCpu, a_uVector) (false)
568# define IEM_SVM_VMEXIT_RET(a_pVCpu, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { return VERR_SVM_IPE_1; } while (0)
569# define IEM_SVM_CRX_VMEXIT_RET(a_pVCpu, a_uExitCode, a_enmAccessCrX, a_iGReg) do { return VERR_SVM_IPE_1; } while (0)
570# define IEM_SVM_CHECK_INSTR_INTERCEPT(a_pVCpu, a_Intercept, a_uExitCode, a_uExitInfo1, a_uExitInfo2) do { } while (0)
571# define IEM_SVM_CHECK_READ_CR0_INTERCEPT(a_pVCpu, a_uExitInfo1, a_uExitInfo2) do { } while (0)
572# define IEM_SVM_UPDATE_NRIP(a_pVCpu) do { } while (0)
573
574#endif
575
576
577/*********************************************************************************************************************************
578* Global Variables *
579*********************************************************************************************************************************/
580extern const PFNIEMOP g_apfnOneByteMap[256]; /* not static since we need to forward declare it. */
581
582
583/** Function table for the ADD instruction. */
584IEM_STATIC const IEMOPBINSIZES g_iemAImpl_add =
585{
586 iemAImpl_add_u8, iemAImpl_add_u8_locked,
587 iemAImpl_add_u16, iemAImpl_add_u16_locked,
588 iemAImpl_add_u32, iemAImpl_add_u32_locked,
589 iemAImpl_add_u64, iemAImpl_add_u64_locked
590};
591
592/** Function table for the ADC instruction. */
593IEM_STATIC const IEMOPBINSIZES g_iemAImpl_adc =
594{
595 iemAImpl_adc_u8, iemAImpl_adc_u8_locked,
596 iemAImpl_adc_u16, iemAImpl_adc_u16_locked,
597 iemAImpl_adc_u32, iemAImpl_adc_u32_locked,
598 iemAImpl_adc_u64, iemAImpl_adc_u64_locked
599};
600
601/** Function table for the SUB instruction. */
602IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sub =
603{
604 iemAImpl_sub_u8, iemAImpl_sub_u8_locked,
605 iemAImpl_sub_u16, iemAImpl_sub_u16_locked,
606 iemAImpl_sub_u32, iemAImpl_sub_u32_locked,
607 iemAImpl_sub_u64, iemAImpl_sub_u64_locked
608};
609
610/** Function table for the SBB instruction. */
611IEM_STATIC const IEMOPBINSIZES g_iemAImpl_sbb =
612{
613 iemAImpl_sbb_u8, iemAImpl_sbb_u8_locked,
614 iemAImpl_sbb_u16, iemAImpl_sbb_u16_locked,
615 iemAImpl_sbb_u32, iemAImpl_sbb_u32_locked,
616 iemAImpl_sbb_u64, iemAImpl_sbb_u64_locked
617};
618
619/** Function table for the OR instruction. */
620IEM_STATIC const IEMOPBINSIZES g_iemAImpl_or =
621{
622 iemAImpl_or_u8, iemAImpl_or_u8_locked,
623 iemAImpl_or_u16, iemAImpl_or_u16_locked,
624 iemAImpl_or_u32, iemAImpl_or_u32_locked,
625 iemAImpl_or_u64, iemAImpl_or_u64_locked
626};
627
628/** Function table for the XOR instruction. */
629IEM_STATIC const IEMOPBINSIZES g_iemAImpl_xor =
630{
631 iemAImpl_xor_u8, iemAImpl_xor_u8_locked,
632 iemAImpl_xor_u16, iemAImpl_xor_u16_locked,
633 iemAImpl_xor_u32, iemAImpl_xor_u32_locked,
634 iemAImpl_xor_u64, iemAImpl_xor_u64_locked
635};
636
637/** Function table for the AND instruction. */
638IEM_STATIC const IEMOPBINSIZES g_iemAImpl_and =
639{
640 iemAImpl_and_u8, iemAImpl_and_u8_locked,
641 iemAImpl_and_u16, iemAImpl_and_u16_locked,
642 iemAImpl_and_u32, iemAImpl_and_u32_locked,
643 iemAImpl_and_u64, iemAImpl_and_u64_locked
644};
645
646/** Function table for the CMP instruction.
647 * @remarks Making operand order ASSUMPTIONS.
648 */
649IEM_STATIC const IEMOPBINSIZES g_iemAImpl_cmp =
650{
651 iemAImpl_cmp_u8, NULL,
652 iemAImpl_cmp_u16, NULL,
653 iemAImpl_cmp_u32, NULL,
654 iemAImpl_cmp_u64, NULL
655};
656
657/** Function table for the TEST instruction.
658 * @remarks Making operand order ASSUMPTIONS.
659 */
660IEM_STATIC const IEMOPBINSIZES g_iemAImpl_test =
661{
662 iemAImpl_test_u8, NULL,
663 iemAImpl_test_u16, NULL,
664 iemAImpl_test_u32, NULL,
665 iemAImpl_test_u64, NULL
666};
667
668/** Function table for the BT instruction. */
669IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bt =
670{
671 NULL, NULL,
672 iemAImpl_bt_u16, NULL,
673 iemAImpl_bt_u32, NULL,
674 iemAImpl_bt_u64, NULL
675};
676
677/** Function table for the BTC instruction. */
678IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btc =
679{
680 NULL, NULL,
681 iemAImpl_btc_u16, iemAImpl_btc_u16_locked,
682 iemAImpl_btc_u32, iemAImpl_btc_u32_locked,
683 iemAImpl_btc_u64, iemAImpl_btc_u64_locked
684};
685
686/** Function table for the BTR instruction. */
687IEM_STATIC const IEMOPBINSIZES g_iemAImpl_btr =
688{
689 NULL, NULL,
690 iemAImpl_btr_u16, iemAImpl_btr_u16_locked,
691 iemAImpl_btr_u32, iemAImpl_btr_u32_locked,
692 iemAImpl_btr_u64, iemAImpl_btr_u64_locked
693};
694
695/** Function table for the BTS instruction. */
696IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bts =
697{
698 NULL, NULL,
699 iemAImpl_bts_u16, iemAImpl_bts_u16_locked,
700 iemAImpl_bts_u32, iemAImpl_bts_u32_locked,
701 iemAImpl_bts_u64, iemAImpl_bts_u64_locked
702};
703
704/** Function table for the BSF instruction. */
705IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsf =
706{
707 NULL, NULL,
708 iemAImpl_bsf_u16, NULL,
709 iemAImpl_bsf_u32, NULL,
710 iemAImpl_bsf_u64, NULL
711};
712
713/** Function table for the BSR instruction. */
714IEM_STATIC const IEMOPBINSIZES g_iemAImpl_bsr =
715{
716 NULL, NULL,
717 iemAImpl_bsr_u16, NULL,
718 iemAImpl_bsr_u32, NULL,
719 iemAImpl_bsr_u64, NULL
720};
721
722/** Function table for the IMUL instruction. */
723IEM_STATIC const IEMOPBINSIZES g_iemAImpl_imul_two =
724{
725 NULL, NULL,
726 iemAImpl_imul_two_u16, NULL,
727 iemAImpl_imul_two_u32, NULL,
728 iemAImpl_imul_two_u64, NULL
729};
730
731/** Group 1 /r lookup table. */
732IEM_STATIC const PCIEMOPBINSIZES g_apIemImplGrp1[8] =
733{
734 &g_iemAImpl_add,
735 &g_iemAImpl_or,
736 &g_iemAImpl_adc,
737 &g_iemAImpl_sbb,
738 &g_iemAImpl_and,
739 &g_iemAImpl_sub,
740 &g_iemAImpl_xor,
741 &g_iemAImpl_cmp
742};
743
744/** Function table for the INC instruction. */
745IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_inc =
746{
747 iemAImpl_inc_u8, iemAImpl_inc_u8_locked,
748 iemAImpl_inc_u16, iemAImpl_inc_u16_locked,
749 iemAImpl_inc_u32, iemAImpl_inc_u32_locked,
750 iemAImpl_inc_u64, iemAImpl_inc_u64_locked
751};
752
753/** Function table for the DEC instruction. */
754IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_dec =
755{
756 iemAImpl_dec_u8, iemAImpl_dec_u8_locked,
757 iemAImpl_dec_u16, iemAImpl_dec_u16_locked,
758 iemAImpl_dec_u32, iemAImpl_dec_u32_locked,
759 iemAImpl_dec_u64, iemAImpl_dec_u64_locked
760};
761
762/** Function table for the NEG instruction. */
763IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_neg =
764{
765 iemAImpl_neg_u8, iemAImpl_neg_u8_locked,
766 iemAImpl_neg_u16, iemAImpl_neg_u16_locked,
767 iemAImpl_neg_u32, iemAImpl_neg_u32_locked,
768 iemAImpl_neg_u64, iemAImpl_neg_u64_locked
769};
770
771/** Function table for the NOT instruction. */
772IEM_STATIC const IEMOPUNARYSIZES g_iemAImpl_not =
773{
774 iemAImpl_not_u8, iemAImpl_not_u8_locked,
775 iemAImpl_not_u16, iemAImpl_not_u16_locked,
776 iemAImpl_not_u32, iemAImpl_not_u32_locked,
777 iemAImpl_not_u64, iemAImpl_not_u64_locked
778};
779
780
781/** Function table for the ROL instruction. */
782IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rol =
783{
784 iemAImpl_rol_u8,
785 iemAImpl_rol_u16,
786 iemAImpl_rol_u32,
787 iemAImpl_rol_u64
788};
789
790/** Function table for the ROR instruction. */
791IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_ror =
792{
793 iemAImpl_ror_u8,
794 iemAImpl_ror_u16,
795 iemAImpl_ror_u32,
796 iemAImpl_ror_u64
797};
798
799/** Function table for the RCL instruction. */
800IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcl =
801{
802 iemAImpl_rcl_u8,
803 iemAImpl_rcl_u16,
804 iemAImpl_rcl_u32,
805 iemAImpl_rcl_u64
806};
807
808/** Function table for the RCR instruction. */
809IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_rcr =
810{
811 iemAImpl_rcr_u8,
812 iemAImpl_rcr_u16,
813 iemAImpl_rcr_u32,
814 iemAImpl_rcr_u64
815};
816
817/** Function table for the SHL instruction. */
818IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shl =
819{
820 iemAImpl_shl_u8,
821 iemAImpl_shl_u16,
822 iemAImpl_shl_u32,
823 iemAImpl_shl_u64
824};
825
826/** Function table for the SHR instruction. */
827IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_shr =
828{
829 iemAImpl_shr_u8,
830 iemAImpl_shr_u16,
831 iemAImpl_shr_u32,
832 iemAImpl_shr_u64
833};
834
835/** Function table for the SAR instruction. */
836IEM_STATIC const IEMOPSHIFTSIZES g_iemAImpl_sar =
837{
838 iemAImpl_sar_u8,
839 iemAImpl_sar_u16,
840 iemAImpl_sar_u32,
841 iemAImpl_sar_u64
842};
843
844
845/** Function table for the MUL instruction. */
846IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_mul =
847{
848 iemAImpl_mul_u8,
849 iemAImpl_mul_u16,
850 iemAImpl_mul_u32,
851 iemAImpl_mul_u64
852};
853
854/** Function table for the IMUL instruction working implicitly on rAX. */
855IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_imul =
856{
857 iemAImpl_imul_u8,
858 iemAImpl_imul_u16,
859 iemAImpl_imul_u32,
860 iemAImpl_imul_u64
861};
862
863/** Function table for the DIV instruction. */
864IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_div =
865{
866 iemAImpl_div_u8,
867 iemAImpl_div_u16,
868 iemAImpl_div_u32,
869 iemAImpl_div_u64
870};
871
872/** Function table for the MUL instruction. */
873IEM_STATIC const IEMOPMULDIVSIZES g_iemAImpl_idiv =
874{
875 iemAImpl_idiv_u8,
876 iemAImpl_idiv_u16,
877 iemAImpl_idiv_u32,
878 iemAImpl_idiv_u64
879};
880
881/** Function table for the SHLD instruction */
882IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shld =
883{
884 iemAImpl_shld_u16,
885 iemAImpl_shld_u32,
886 iemAImpl_shld_u64,
887};
888
889/** Function table for the SHRD instruction */
890IEM_STATIC const IEMOPSHIFTDBLSIZES g_iemAImpl_shrd =
891{
892 iemAImpl_shrd_u16,
893 iemAImpl_shrd_u32,
894 iemAImpl_shrd_u64,
895};
896
897
898/** Function table for the PUNPCKLBW instruction */
899IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw = { iemAImpl_punpcklbw_u64, iemAImpl_punpcklbw_u128 };
900/** Function table for the PUNPCKLBD instruction */
901IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd = { iemAImpl_punpcklwd_u64, iemAImpl_punpcklwd_u128 };
902/** Function table for the PUNPCKLDQ instruction */
903IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpckldq = { iemAImpl_punpckldq_u64, iemAImpl_punpckldq_u128 };
904/** Function table for the PUNPCKLQDQ instruction */
905IEM_STATIC const IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq = { NULL, iemAImpl_punpcklqdq_u128 };
906
907/** Function table for the PUNPCKHBW instruction */
908IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw = { iemAImpl_punpckhbw_u64, iemAImpl_punpckhbw_u128 };
909/** Function table for the PUNPCKHBD instruction */
910IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd = { iemAImpl_punpckhwd_u64, iemAImpl_punpckhwd_u128 };
911/** Function table for the PUNPCKHDQ instruction */
912IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq = { iemAImpl_punpckhdq_u64, iemAImpl_punpckhdq_u128 };
913/** Function table for the PUNPCKHQDQ instruction */
914IEM_STATIC const IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq = { NULL, iemAImpl_punpckhqdq_u128 };
915
916/** Function table for the PXOR instruction */
917IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pxor = { iemAImpl_pxor_u64, iemAImpl_pxor_u128 };
918/** Function table for the PCMPEQB instruction */
919IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqb = { iemAImpl_pcmpeqb_u64, iemAImpl_pcmpeqb_u128 };
920/** Function table for the PCMPEQW instruction */
921IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqw = { iemAImpl_pcmpeqw_u64, iemAImpl_pcmpeqw_u128 };
922/** Function table for the PCMPEQD instruction */
923IEM_STATIC const IEMOPMEDIAF2 g_iemAImpl_pcmpeqd = { iemAImpl_pcmpeqd_u64, iemAImpl_pcmpeqd_u128 };
924
925
926#if defined(IEM_LOG_MEMORY_WRITES)
927/** What IEM just wrote. */
928uint8_t g_abIemWrote[256];
929/** How much IEM just wrote. */
930size_t g_cbIemWrote;
931#endif
932
933
934/*********************************************************************************************************************************
935* Internal Functions *
936*********************************************************************************************************************************/
937IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr);
938IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu);
939IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu);
940IEM_STATIC VBOXSTRICTRC iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel);
941/*IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresent(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);*/
942IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
943IEM_STATIC VBOXSTRICTRC iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
944IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel);
945IEM_STATIC VBOXSTRICTRC iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr);
946IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr);
947IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu);
948IEM_STATIC VBOXSTRICTRC iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL uSel);
949IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
950IEM_STATIC VBOXSTRICTRC iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel);
951IEM_STATIC VBOXSTRICTRC iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
952IEM_STATIC VBOXSTRICTRC iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
953IEM_STATIC VBOXSTRICTRC iemRaiseAlignmentCheckException(PVMCPUCC pVCpu);
954#ifdef IEM_WITH_SETJMP
955DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc);
956DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu);
957DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
958DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel);
959DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess);
960#endif
961
962IEM_STATIC VBOXSTRICTRC iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess);
963IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess);
964IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
965IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
966IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
967IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
968IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
969IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
970IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem);
971IEM_STATIC VBOXSTRICTRC iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode);
972IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt);
973IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp);
974IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp);
975IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value);
976IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value);
977IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel);
978DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg);
979DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg);
980
981#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
982IEM_STATIC VBOXSTRICTRC iemVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual);
983IEM_STATIC VBOXSTRICTRC iemVmxVmexitTaskSwitch(PVMCPUCC pVCpu, IEMTASKSWITCH enmTaskSwitch, RTSEL SelNewTss, uint8_t cbInstr);
984IEM_STATIC VBOXSTRICTRC iemVmxVmexitEvent(PVMCPUCC pVCpu, uint8_t uVector, uint32_t fFlags, uint32_t uErrCode, uint64_t uCr2, uint8_t cbInstr);
985IEM_STATIC VBOXSTRICTRC iemVmxVmexitEventDoubleFault(PVMCPUCC pVCpu);
986IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMem(PVMCPUCC pVCpu, uint16_t offAccess, size_t cbAccess, void *pvData, uint32_t fAccess);
987IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrRead(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value);
988IEM_STATIC VBOXSTRICTRC iemVmxVirtApicAccessMsrWrite(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t u64Value);
989IEM_STATIC VBOXSTRICTRC iemVmxVmexitEpt(PVMCPUCC pVCpu, PPGMPTWALK pWalk, uint32_t fAccess, uint32_t fSlatFail, uint8_t cbInstr);
990#endif
991
992#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
993IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2);
994IEM_STATIC VBOXSTRICTRC iemHandleSvmEventIntercept(PVMCPUCC pVCpu, uint8_t u8Vector, uint32_t fFlags, uint32_t uErr, uint64_t uCr2);
995#endif
996
997
998/**
999 * Sets the pass up status.
1000 *
1001 * @returns VINF_SUCCESS.
1002 * @param pVCpu The cross context virtual CPU structure of the
1003 * calling thread.
1004 * @param rcPassUp The pass up status. Must be informational.
1005 * VINF_SUCCESS is not allowed.
1006 */
1007IEM_STATIC int iemSetPassUpStatus(PVMCPUCC pVCpu, VBOXSTRICTRC rcPassUp)
1008{
1009 AssertRC(VBOXSTRICTRC_VAL(rcPassUp)); Assert(rcPassUp != VINF_SUCCESS);
1010
1011 int32_t const rcOldPassUp = pVCpu->iem.s.rcPassUp;
1012 if (rcOldPassUp == VINF_SUCCESS)
1013 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1014 /* If both are EM scheduling codes, use EM priority rules. */
1015 else if ( rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST
1016 && rcPassUp >= VINF_EM_FIRST && rcPassUp <= VINF_EM_LAST)
1017 {
1018 if (rcPassUp < rcOldPassUp)
1019 {
1020 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1021 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1022 }
1023 else
1024 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1025 }
1026 /* Override EM scheduling with specific status code. */
1027 else if (rcOldPassUp >= VINF_EM_FIRST && rcOldPassUp <= VINF_EM_LAST)
1028 {
1029 Log(("IEM: rcPassUp=%Rrc! rcOldPassUp=%Rrc\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1030 pVCpu->iem.s.rcPassUp = VBOXSTRICTRC_VAL(rcPassUp);
1031 }
1032 /* Don't override specific status code, first come first served. */
1033 else
1034 Log(("IEM: rcPassUp=%Rrc rcOldPassUp=%Rrc!\n", VBOXSTRICTRC_VAL(rcPassUp), rcOldPassUp));
1035 return VINF_SUCCESS;
1036}
1037
1038
1039/**
1040 * Calculates the CPU mode.
1041 *
1042 * This is mainly for updating IEMCPU::enmCpuMode.
1043 *
1044 * @returns CPU mode.
1045 * @param pVCpu The cross context virtual CPU structure of the
1046 * calling thread.
1047 */
1048DECLINLINE(IEMMODE) iemCalcCpuMode(PVMCPUCC pVCpu)
1049{
1050 if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
1051 return IEMMODE_64BIT;
1052 if (pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig) /** @todo check if this is correct... */
1053 return IEMMODE_32BIT;
1054 return IEMMODE_16BIT;
1055}
1056
1057
1058/**
1059 * Initializes the execution state.
1060 *
1061 * @param pVCpu The cross context virtual CPU structure of the
1062 * calling thread.
1063 * @param fBypassHandlers Whether to bypass access handlers.
1064 *
1065 * @remarks Callers of this must call iemUninitExec() to undo potentially fatal
1066 * side-effects in strict builds.
1067 */
1068DECLINLINE(void) iemInitExec(PVMCPUCC pVCpu, bool fBypassHandlers)
1069{
1070 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK);
1071 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1072 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1073 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1074 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1075 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1076 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1077 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1078 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1079 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1080
1081 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1082 pVCpu->iem.s.enmCpuMode = iemCalcCpuMode(pVCpu);
1083#ifdef VBOX_STRICT
1084 pVCpu->iem.s.enmDefAddrMode = (IEMMODE)0xfe;
1085 pVCpu->iem.s.enmEffAddrMode = (IEMMODE)0xfe;
1086 pVCpu->iem.s.enmDefOpSize = (IEMMODE)0xfe;
1087 pVCpu->iem.s.enmEffOpSize = (IEMMODE)0xfe;
1088 pVCpu->iem.s.fPrefixes = 0xfeedbeef;
1089 pVCpu->iem.s.uRexReg = 127;
1090 pVCpu->iem.s.uRexB = 127;
1091 pVCpu->iem.s.offModRm = 127;
1092 pVCpu->iem.s.uRexIndex = 127;
1093 pVCpu->iem.s.iEffSeg = 127;
1094 pVCpu->iem.s.idxPrefix = 127;
1095 pVCpu->iem.s.uVex3rdReg = 127;
1096 pVCpu->iem.s.uVexLength = 127;
1097 pVCpu->iem.s.fEvexStuff = 127;
1098 pVCpu->iem.s.uFpuOpcode = UINT16_MAX;
1099# ifdef IEM_WITH_CODE_TLB
1100 pVCpu->iem.s.offInstrNextByte = UINT16_MAX;
1101 pVCpu->iem.s.pbInstrBuf = NULL;
1102 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1103 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1104 pVCpu->iem.s.offCurInstrStart = INT16_MAX;
1105 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1106# else
1107 pVCpu->iem.s.offOpcode = 127;
1108 pVCpu->iem.s.cbOpcode = 127;
1109# endif
1110#endif
1111
1112 pVCpu->iem.s.cActiveMappings = 0;
1113 pVCpu->iem.s.iNextMapping = 0;
1114 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1115 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1116#if 0
1117#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1118 if ( CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx)
1119 && CPUMIsGuestVmxProcCtls2Set(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS2_VIRT_APIC_ACCESS))
1120 {
1121 PCVMXVVMCS pVmcs = pVCpu->cpum.GstCtx.hwvirt.vmx.CTX_SUFF(pVmcs);
1122 Assert(pVmcs);
1123 RTGCPHYS const GCPhysApicAccess = pVmcs->u64AddrApicAccess.u;
1124 if (!PGMHandlerPhysicalIsRegistered(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess))
1125 {
1126 int rc = PGMHandlerPhysicalRegister(pVCpu->CTX_SUFF(pVM), GCPhysApicAccess, GCPhysApicAccess + X86_PAGE_4K_SIZE - 1,
1127 pVCpu->iem.s.hVmxApicAccessPage, NIL_RTR3PTR /* pvUserR3 */,
1128 NIL_RTR0PTR /* pvUserR0 */, NIL_RTRCPTR /* pvUserRC */, NULL /* pszDesc */);
1129 AssertRC(rc);
1130 }
1131 }
1132#endif
1133#endif
1134}
1135
1136#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
1137/**
1138 * Performs a minimal reinitialization of the execution state.
1139 *
1140 * This is intended to be used by VM-exits, SMM, LOADALL and other similar
1141 * 'world-switch' types operations on the CPU. Currently only nested
1142 * hardware-virtualization uses it.
1143 *
1144 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1145 */
1146IEM_STATIC void iemReInitExec(PVMCPUCC pVCpu)
1147{
1148 IEMMODE const enmMode = iemCalcCpuMode(pVCpu);
1149 uint8_t const uCpl = CPUMGetGuestCPL(pVCpu);
1150
1151 pVCpu->iem.s.uCpl = uCpl;
1152 pVCpu->iem.s.enmCpuMode = enmMode;
1153 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1154 pVCpu->iem.s.enmEffAddrMode = enmMode;
1155 if (enmMode != IEMMODE_64BIT)
1156 {
1157 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1158 pVCpu->iem.s.enmEffOpSize = enmMode;
1159 }
1160 else
1161 {
1162 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1163 pVCpu->iem.s.enmEffOpSize = enmMode;
1164 }
1165 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1166#ifndef IEM_WITH_CODE_TLB
1167 /** @todo Shouldn't we be doing this in IEMTlbInvalidateAll()? */
1168 pVCpu->iem.s.offOpcode = 0;
1169 pVCpu->iem.s.cbOpcode = 0;
1170#endif
1171 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1172}
1173#endif
1174
1175/**
1176 * Counterpart to #iemInitExec that undoes evil strict-build stuff.
1177 *
1178 * @param pVCpu The cross context virtual CPU structure of the
1179 * calling thread.
1180 */
1181DECLINLINE(void) iemUninitExec(PVMCPUCC pVCpu)
1182{
1183 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */
1184#ifdef VBOX_STRICT
1185# ifdef IEM_WITH_CODE_TLB
1186 NOREF(pVCpu);
1187# else
1188 pVCpu->iem.s.cbOpcode = 0;
1189# endif
1190#else
1191 NOREF(pVCpu);
1192#endif
1193}
1194
1195
1196/**
1197 * Initializes the decoder state.
1198 *
1199 * iemReInitDecoder is mostly a copy of this function.
1200 *
1201 * @param pVCpu The cross context virtual CPU structure of the
1202 * calling thread.
1203 * @param fBypassHandlers Whether to bypass access handlers.
1204 * @param fDisregardLock Whether to disregard the LOCK prefix.
1205 */
1206DECLINLINE(void) iemInitDecoder(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1207{
1208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
1209 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1210 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1211 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1212 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1213 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1214 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1215 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1216 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1217 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1218
1219 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu);
1220 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1221 pVCpu->iem.s.enmCpuMode = enmMode;
1222 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1223 pVCpu->iem.s.enmEffAddrMode = enmMode;
1224 if (enmMode != IEMMODE_64BIT)
1225 {
1226 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1227 pVCpu->iem.s.enmEffOpSize = enmMode;
1228 }
1229 else
1230 {
1231 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1232 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1233 }
1234 pVCpu->iem.s.fPrefixes = 0;
1235 pVCpu->iem.s.uRexReg = 0;
1236 pVCpu->iem.s.uRexB = 0;
1237 pVCpu->iem.s.uRexIndex = 0;
1238 pVCpu->iem.s.idxPrefix = 0;
1239 pVCpu->iem.s.uVex3rdReg = 0;
1240 pVCpu->iem.s.uVexLength = 0;
1241 pVCpu->iem.s.fEvexStuff = 0;
1242 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1243#ifdef IEM_WITH_CODE_TLB
1244 pVCpu->iem.s.pbInstrBuf = NULL;
1245 pVCpu->iem.s.offInstrNextByte = 0;
1246 pVCpu->iem.s.offCurInstrStart = 0;
1247# ifdef VBOX_STRICT
1248 pVCpu->iem.s.cbInstrBuf = UINT16_MAX;
1249 pVCpu->iem.s.cbInstrBufTotal = UINT16_MAX;
1250 pVCpu->iem.s.uInstrBufPc = UINT64_C(0xc0ffc0ffcff0c0ff);
1251# endif
1252#else
1253 pVCpu->iem.s.offOpcode = 0;
1254 pVCpu->iem.s.cbOpcode = 0;
1255#endif
1256 pVCpu->iem.s.offModRm = 0;
1257 pVCpu->iem.s.cActiveMappings = 0;
1258 pVCpu->iem.s.iNextMapping = 0;
1259 pVCpu->iem.s.rcPassUp = VINF_SUCCESS;
1260 pVCpu->iem.s.fBypassHandlers = fBypassHandlers;
1261 pVCpu->iem.s.fDisregardLock = fDisregardLock;
1262
1263#ifdef DBGFTRACE_ENABLED
1264 switch (enmMode)
1265 {
1266 case IEMMODE_64BIT:
1267 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1268 break;
1269 case IEMMODE_32BIT:
1270 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1271 break;
1272 case IEMMODE_16BIT:
1273 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1274 break;
1275 }
1276#endif
1277}
1278
1279
1280/**
1281 * Reinitializes the decoder state 2nd+ loop of IEMExecLots.
1282 *
1283 * This is mostly a copy of iemInitDecoder.
1284 *
1285 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1286 */
1287DECLINLINE(void) iemReInitDecoder(PVMCPUCC pVCpu)
1288{
1289 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
1290 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
1291 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
1292 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.es));
1293 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ds));
1294 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.fs));
1295 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.gs));
1296 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
1297 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.tr));
1298
1299 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */
1300 IEMMODE enmMode = iemCalcCpuMode(pVCpu);
1301 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */
1302 pVCpu->iem.s.enmDefAddrMode = enmMode; /** @todo check if this is correct... */
1303 pVCpu->iem.s.enmEffAddrMode = enmMode;
1304 if (enmMode != IEMMODE_64BIT)
1305 {
1306 pVCpu->iem.s.enmDefOpSize = enmMode; /** @todo check if this is correct... */
1307 pVCpu->iem.s.enmEffOpSize = enmMode;
1308 }
1309 else
1310 {
1311 pVCpu->iem.s.enmDefOpSize = IEMMODE_32BIT;
1312 pVCpu->iem.s.enmEffOpSize = IEMMODE_32BIT;
1313 }
1314 pVCpu->iem.s.fPrefixes = 0;
1315 pVCpu->iem.s.uRexReg = 0;
1316 pVCpu->iem.s.uRexB = 0;
1317 pVCpu->iem.s.uRexIndex = 0;
1318 pVCpu->iem.s.idxPrefix = 0;
1319 pVCpu->iem.s.uVex3rdReg = 0;
1320 pVCpu->iem.s.uVexLength = 0;
1321 pVCpu->iem.s.fEvexStuff = 0;
1322 pVCpu->iem.s.iEffSeg = X86_SREG_DS;
1323#ifdef IEM_WITH_CODE_TLB
1324 if (pVCpu->iem.s.pbInstrBuf)
1325 {
1326 uint64_t off = (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT ? pVCpu->cpum.GstCtx.rip : pVCpu->cpum.GstCtx.eip + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base)
1327 - pVCpu->iem.s.uInstrBufPc;
1328 if (off < pVCpu->iem.s.cbInstrBufTotal)
1329 {
1330 pVCpu->iem.s.offInstrNextByte = (uint32_t)off;
1331 pVCpu->iem.s.offCurInstrStart = (uint16_t)off;
1332 if ((uint16_t)off + 15 <= pVCpu->iem.s.cbInstrBufTotal)
1333 pVCpu->iem.s.cbInstrBuf = (uint16_t)off + 15;
1334 else
1335 pVCpu->iem.s.cbInstrBuf = pVCpu->iem.s.cbInstrBufTotal;
1336 }
1337 else
1338 {
1339 pVCpu->iem.s.pbInstrBuf = NULL;
1340 pVCpu->iem.s.offInstrNextByte = 0;
1341 pVCpu->iem.s.offCurInstrStart = 0;
1342 pVCpu->iem.s.cbInstrBuf = 0;
1343 pVCpu->iem.s.cbInstrBufTotal = 0;
1344 }
1345 }
1346 else
1347 {
1348 pVCpu->iem.s.offInstrNextByte = 0;
1349 pVCpu->iem.s.offCurInstrStart = 0;
1350 pVCpu->iem.s.cbInstrBuf = 0;
1351 pVCpu->iem.s.cbInstrBufTotal = 0;
1352 }
1353#else
1354 pVCpu->iem.s.cbOpcode = 0;
1355 pVCpu->iem.s.offOpcode = 0;
1356#endif
1357 pVCpu->iem.s.offModRm = 0;
1358 Assert(pVCpu->iem.s.cActiveMappings == 0);
1359 pVCpu->iem.s.iNextMapping = 0;
1360 Assert(pVCpu->iem.s.rcPassUp == VINF_SUCCESS);
1361 Assert(pVCpu->iem.s.fBypassHandlers == false);
1362
1363#ifdef DBGFTRACE_ENABLED
1364 switch (enmMode)
1365 {
1366 case IEMMODE_64BIT:
1367 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I64/%u %08llx", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.rip);
1368 break;
1369 case IEMMODE_32BIT:
1370 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I32/%u %04x:%08x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1371 break;
1372 case IEMMODE_16BIT:
1373 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "I16/%u %04x:%04x", pVCpu->iem.s.uCpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip);
1374 break;
1375 }
1376#endif
1377}
1378
1379
1380
1381/**
1382 * Prefetch opcodes the first time when starting executing.
1383 *
1384 * @returns Strict VBox status code.
1385 * @param pVCpu The cross context virtual CPU structure of the
1386 * calling thread.
1387 * @param fBypassHandlers Whether to bypass access handlers.
1388 * @param fDisregardLock Whether to disregard LOCK prefixes.
1389 *
1390 * @todo Combine fDisregardLock and fBypassHandlers into a flag parameter and
1391 * store them as such.
1392 */
1393IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPUCC pVCpu, bool fBypassHandlers, bool fDisregardLock)
1394{
1395 iemInitDecoder(pVCpu, fBypassHandlers, fDisregardLock);
1396
1397#ifdef IEM_WITH_CODE_TLB
1398 /** @todo Do ITLB lookup here. */
1399
1400#else /* !IEM_WITH_CODE_TLB */
1401
1402 /*
1403 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1404 *
1405 * First translate CS:rIP to a physical address.
1406 */
1407 uint32_t cbToTryRead;
1408 RTGCPTR GCPtrPC;
1409 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1410 {
1411 cbToTryRead = PAGE_SIZE;
1412 GCPtrPC = pVCpu->cpum.GstCtx.rip;
1413 if (IEM_IS_CANONICAL(GCPtrPC))
1414 cbToTryRead = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1415 else
1416 return iemRaiseGeneralProtectionFault0(pVCpu);
1417 }
1418 else
1419 {
1420 uint32_t GCPtrPC32 = pVCpu->cpum.GstCtx.eip;
1421 AssertMsg(!(GCPtrPC32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT, ("%04x:%RX64\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip));
1422 if (GCPtrPC32 <= pVCpu->cpum.GstCtx.cs.u32Limit)
1423 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrPC32 + 1;
1424 else
1425 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1426 if (cbToTryRead) { /* likely */ }
1427 else /* overflowed */
1428 {
1429 Assert(GCPtrPC32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1430 cbToTryRead = UINT32_MAX;
1431 }
1432 GCPtrPC = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrPC32;
1433 Assert(GCPtrPC <= UINT32_MAX);
1434 }
1435
1436 PGMPTWALK Walk;
1437 int rc = PGMGstGetPage(pVCpu, GCPtrPC, &Walk);
1438 if (RT_SUCCESS(rc))
1439 Assert(Walk.fSucceeded); /* probable. */
1440 else
1441 {
1442 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - rc=%Rrc\n", GCPtrPC, rc));
1443#ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
1444 if (Walk.fFailed & PGM_WALKFAIL_EPT)
1445 {
1446 Assert(IEM_VMX_IS_NON_ROOT_MODE(pVCpu));
1447 IEM_VMX_VMEXIT_EPT_RET(pVCpu, &Walk, IEM_ACCESS_INSTRUCTION, IEM_SLAT_FAIL_LINEAR_TO_PHYS_ADDR, 0 /* cbInstr */);
1448 }
1449#endif
1450 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, rc);
1451 }
1452 if ((Walk.fEffective & X86_PTE_US) || pVCpu->iem.s.uCpl != 3) { /* likely */ }
1453 else
1454 {
1455 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - supervisor page\n", GCPtrPC));
1456 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1457 }
1458 if (!(Walk.fEffective & X86_PTE_PAE_NX) || !(pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE)) { /* likely */ }
1459 else
1460 {
1461 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv - NX\n", GCPtrPC));
1462 return iemRaisePageFault(pVCpu, GCPtrPC, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1463 }
1464 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrPC & PAGE_OFFSET_MASK);
1465 /** @todo Check reserved bits and such stuff. PGM is better at doing
1466 * that, so do it when implementing the guest virtual address
1467 * TLB... */
1468
1469 /*
1470 * Read the bytes at this address.
1471 */
1472 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrPC & PAGE_OFFSET_MASK);
1473 if (cbToTryRead > cbLeftOnPage)
1474 cbToTryRead = cbLeftOnPage;
1475 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode))
1476 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode);
1477
1478 if (!pVCpu->iem.s.fBypassHandlers)
1479 {
1480 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, pVCpu->iem.s.abOpcode, cbToTryRead, PGMACCESSORIGIN_IEM);
1481 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1482 { /* likely */ }
1483 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1484 {
1485 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1486 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1487 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1488 }
1489 else
1490 {
1491 Log((RT_SUCCESS(rcStrict)
1492 ? "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1493 : "iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1494 GCPtrPC, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1495 return rcStrict;
1496 }
1497 }
1498 else
1499 {
1500 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pVCpu->iem.s.abOpcode, GCPhys, cbToTryRead);
1501 if (RT_SUCCESS(rc))
1502 { /* likely */ }
1503 else
1504 {
1505 Log(("iemInitDecoderAndPrefetchOpcodes: %RGv/%RGp LB %#x - read error - rc=%Rrc (!!)\n",
1506 GCPtrPC, GCPhys, rc, cbToTryRead));
1507 return rc;
1508 }
1509 }
1510 pVCpu->iem.s.cbOpcode = cbToTryRead;
1511#endif /* !IEM_WITH_CODE_TLB */
1512 return VINF_SUCCESS;
1513}
1514
1515
1516/**
1517 * Invalidates the IEM TLBs.
1518 *
1519 * This is called internally as well as by PGM when moving GC mappings.
1520 *
1521 * @returns
1522 * @param pVCpu The cross context virtual CPU structure of the calling
1523 * thread.
1524 * @param fVmm Set when PGM calls us with a remapping.
1525 */
1526VMM_INT_DECL(void) IEMTlbInvalidateAll(PVMCPUCC pVCpu, bool fVmm)
1527{
1528#ifdef IEM_WITH_CODE_TLB
1529 pVCpu->iem.s.cbInstrBufTotal = 0;
1530 pVCpu->iem.s.CodeTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1531 if (pVCpu->iem.s.CodeTlb.uTlbRevision != 0)
1532 { /* very likely */ }
1533 else
1534 {
1535 pVCpu->iem.s.CodeTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1536 unsigned i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1537 while (i-- > 0)
1538 pVCpu->iem.s.CodeTlb.aEntries[i].uTag = 0;
1539 }
1540#endif
1541
1542#ifdef IEM_WITH_DATA_TLB
1543 pVCpu->iem.s.DataTlb.uTlbRevision += IEMTLB_REVISION_INCR;
1544 if (pVCpu->iem.s.DataTlb.uTlbRevision != 0)
1545 { /* very likely */ }
1546 else
1547 {
1548 pVCpu->iem.s.DataTlb.uTlbRevision = IEMTLB_REVISION_INCR;
1549 unsigned i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1550 while (i-- > 0)
1551 pVCpu->iem.s.DataTlb.aEntries[i].uTag = 0;
1552 }
1553#endif
1554 NOREF(pVCpu); NOREF(fVmm);
1555}
1556
1557
1558/**
1559 * Invalidates a page in the TLBs.
1560 *
1561 * @param pVCpu The cross context virtual CPU structure of the calling
1562 * thread.
1563 * @param GCPtr The address of the page to invalidate
1564 */
1565VMM_INT_DECL(void) IEMTlbInvalidatePage(PVMCPUCC pVCpu, RTGCPTR GCPtr)
1566{
1567#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1568 GCPtr = GCPtr >> X86_PAGE_SHIFT;
1569 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1570 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries) == 256);
1571 uintptr_t idx = (uint8_t)GCPtr;
1572
1573# ifdef IEM_WITH_CODE_TLB
1574 if (pVCpu->iem.s.CodeTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.CodeTlb.uTlbRevision))
1575 {
1576 pVCpu->iem.s.CodeTlb.aEntries[idx].uTag = 0;
1577 if (GCPtr == (pVCpu->iem.s.uInstrBufPc >> X86_PAGE_SHIFT))
1578 pVCpu->iem.s.cbInstrBufTotal = 0;
1579 }
1580# endif
1581
1582# ifdef IEM_WITH_DATA_TLB
1583 if (pVCpu->iem.s.DataTlb.aEntries[idx].uTag == (GCPtr | pVCpu->iem.s.DataTlb.uTlbRevision))
1584 pVCpu->iem.s.DataTlb.aEntries[idx].uTag = 0;
1585# endif
1586#else
1587 NOREF(pVCpu); NOREF(GCPtr);
1588#endif
1589}
1590
1591
1592/**
1593 * Invalidates the host physical aspects of the IEM TLBs.
1594 *
1595 * This is called internally as well as by PGM when moving GC mappings.
1596 *
1597 * @param pVCpu The cross context virtual CPU structure of the calling
1598 * thread.
1599 */
1600VMM_INT_DECL(void) IEMTlbInvalidateAllPhysical(PVMCPUCC pVCpu)
1601{
1602#if defined(IEM_WITH_CODE_TLB) || defined(IEM_WITH_DATA_TLB)
1603 /* Note! This probably won't end up looking exactly like this, but it give an idea... */
1604
1605# ifdef IEM_WITH_CODE_TLB
1606 pVCpu->iem.s.cbInstrBufTotal = 0;
1607# endif
1608 uint64_t uTlbPhysRev = pVCpu->iem.s.CodeTlb.uTlbPhysRev + IEMTLB_PHYS_REV_INCR;
1609 if (uTlbPhysRev != 0)
1610 {
1611 pVCpu->iem.s.CodeTlb.uTlbPhysRev = uTlbPhysRev;
1612 pVCpu->iem.s.DataTlb.uTlbPhysRev = uTlbPhysRev;
1613 }
1614 else
1615 {
1616 pVCpu->iem.s.CodeTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1617 pVCpu->iem.s.DataTlb.uTlbPhysRev = IEMTLB_PHYS_REV_INCR;
1618
1619 unsigned i;
1620# ifdef IEM_WITH_CODE_TLB
1621 i = RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries);
1622 while (i-- > 0)
1623 {
1624 pVCpu->iem.s.CodeTlb.aEntries[i].pbMappingR3 = NULL;
1625 pVCpu->iem.s.CodeTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1626 }
1627# endif
1628# ifdef IEM_WITH_DATA_TLB
1629 i = RT_ELEMENTS(pVCpu->iem.s.DataTlb.aEntries);
1630 while (i-- > 0)
1631 {
1632 pVCpu->iem.s.DataTlb.aEntries[i].pbMappingR3 = NULL;
1633 pVCpu->iem.s.DataTlb.aEntries[i].fFlagsAndPhysRev &= ~(IEMTLBE_F_PG_NO_WRITE | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PHYS_REV);
1634 }
1635# endif
1636 }
1637#else
1638 NOREF(pVCpu);
1639#endif
1640}
1641
1642
1643/**
1644 * Invalidates the host physical aspects of the IEM TLBs.
1645 *
1646 * This is called internally as well as by PGM when moving GC mappings.
1647 *
1648 * @param pVM The cross context VM structure.
1649 *
1650 * @remarks Caller holds the PGM lock.
1651 */
1652VMM_INT_DECL(void) IEMTlbInvalidateAllPhysicalAllCpus(PVM pVM)
1653{
1654 RT_NOREF_PV(pVM);
1655}
1656
1657#ifdef IEM_WITH_CODE_TLB
1658
1659/**
1660 * Tries to fetches @a cbDst opcode bytes, raise the appropriate exception on
1661 * failure and jumps.
1662 *
1663 * We end up here for a number of reasons:
1664 * - pbInstrBuf isn't yet initialized.
1665 * - Advancing beyond the buffer boundrary (e.g. cross page).
1666 * - Advancing beyond the CS segment limit.
1667 * - Fetching from non-mappable page (e.g. MMIO).
1668 *
1669 * @param pVCpu The cross context virtual CPU structure of the
1670 * calling thread.
1671 * @param pvDst Where to return the bytes.
1672 * @param cbDst Number of bytes to read.
1673 *
1674 * @todo Make cbDst = 0 a way of initializing pbInstrBuf?
1675 */
1676IEM_STATIC void iemOpcodeFetchBytesJmp(PVMCPUCC pVCpu, size_t cbDst, void *pvDst)
1677{
1678#ifdef IN_RING3
1679 for (;;)
1680 {
1681 Assert(cbDst <= 8);
1682 uint32_t offBuf = pVCpu->iem.s.offInstrNextByte;
1683
1684 /*
1685 * We might have a partial buffer match, deal with that first to make the
1686 * rest simpler. This is the first part of the cross page/buffer case.
1687 */
1688 if (pVCpu->iem.s.pbInstrBuf != NULL)
1689 {
1690 if (offBuf < pVCpu->iem.s.cbInstrBuf)
1691 {
1692 Assert(offBuf + cbDst > pVCpu->iem.s.cbInstrBuf);
1693 uint32_t const cbCopy = pVCpu->iem.s.cbInstrBuf - pVCpu->iem.s.offInstrNextByte;
1694 memcpy(pvDst, &pVCpu->iem.s.pbInstrBuf[offBuf], cbCopy);
1695
1696 cbDst -= cbCopy;
1697 pvDst = (uint8_t *)pvDst + cbCopy;
1698 offBuf += cbCopy;
1699 pVCpu->iem.s.offInstrNextByte += offBuf;
1700 }
1701 }
1702
1703 /*
1704 * Check segment limit, figuring how much we're allowed to access at this point.
1705 *
1706 * We will fault immediately if RIP is past the segment limit / in non-canonical
1707 * territory. If we do continue, there are one or more bytes to read before we
1708 * end up in trouble and we need to do that first before faulting.
1709 */
1710 RTGCPTR GCPtrFirst;
1711 uint32_t cbMaxRead;
1712 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1713 {
1714 GCPtrFirst = pVCpu->cpum.GstCtx.rip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1715 if (RT_LIKELY(IEM_IS_CANONICAL(GCPtrFirst)))
1716 { /* likely */ }
1717 else
1718 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
1719 cbMaxRead = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1720 }
1721 else
1722 {
1723 GCPtrFirst = pVCpu->cpum.GstCtx.eip + (offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart);
1724 Assert(!(GCPtrFirst & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1725 if (RT_LIKELY((uint32_t)GCPtrFirst <= pVCpu->cpum.GstCtx.cs.u32Limit))
1726 { /* likely */ }
1727 else
1728 iemRaiseSelectorBoundsJmp(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1729 cbMaxRead = pVCpu->cpum.GstCtx.cs.u32Limit - (uint32_t)GCPtrFirst + 1;
1730 if (cbMaxRead != 0)
1731 { /* likely */ }
1732 else
1733 {
1734 /* Overflowed because address is 0 and limit is max. */
1735 Assert(GCPtrFirst == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1736 cbMaxRead = X86_PAGE_SIZE;
1737 }
1738 GCPtrFirst = (uint32_t)GCPtrFirst + (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base;
1739 uint32_t cbMaxRead2 = X86_PAGE_SIZE - ((uint32_t)GCPtrFirst & X86_PAGE_OFFSET_MASK);
1740 if (cbMaxRead2 < cbMaxRead)
1741 cbMaxRead = cbMaxRead2;
1742 /** @todo testcase: unreal modes, both huge 16-bit and 32-bit. */
1743 }
1744
1745 /*
1746 * Get the TLB entry for this piece of code.
1747 */
1748 uint64_t uTag = (GCPtrFirst >> X86_PAGE_SHIFT) | pVCpu->iem.s.CodeTlb.uTlbRevision;
1749 AssertCompile(RT_ELEMENTS(pVCpu->iem.s.CodeTlb.aEntries) == 256);
1750 PIEMTLBENTRY pTlbe = &pVCpu->iem.s.CodeTlb.aEntries[(uint8_t)uTag];
1751 if (pTlbe->uTag == uTag)
1752 {
1753 /* likely when executing lots of code, otherwise unlikely */
1754# ifdef VBOX_WITH_STATISTICS
1755 pVCpu->iem.s.CodeTlb.cTlbHits++;
1756# endif
1757 }
1758 else
1759 {
1760 pVCpu->iem.s.CodeTlb.cTlbMisses++;
1761 PGMPTWALK Walk;
1762 int rc = PGMGstGetPage(pVCpu, GCPtrFirst, &Walk);
1763 if (RT_FAILURE(rc))
1764 {
1765 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrFirst, rc));
1766 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, rc);
1767 }
1768
1769 AssertCompile(IEMTLBE_F_PT_NO_EXEC == 1);
1770 Assert(Walk.fSucceeded);
1771 pTlbe->uTag = uTag;
1772 pTlbe->fFlagsAndPhysRev = (~Walk.fEffective & (X86_PTE_US | X86_PTE_RW | X86_PTE_D))
1773 | (Walk.fEffective >> X86_PTE_PAE_BIT_NX);
1774 pTlbe->GCPhys = Walk.GCPhys;
1775 pTlbe->pbMappingR3 = NULL;
1776 }
1777
1778 /*
1779 * Check TLB page table level access flags.
1780 */
1781 if (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PT_NO_USER | IEMTLBE_F_PT_NO_EXEC))
1782 {
1783 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_USER) && pVCpu->iem.s.uCpl == 3)
1784 {
1785 Log(("iemOpcodeFetchBytesJmp: %RGv - supervisor page\n", GCPtrFirst));
1786 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1787 }
1788 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PT_NO_EXEC) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1789 {
1790 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrFirst));
1791 iemRaisePageFaultJmp(pVCpu, GCPtrFirst, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1792 }
1793 }
1794
1795 /*
1796 * Look up the physical page info if necessary.
1797 */
1798 if ((pTlbe->fFlagsAndPhysRev & IEMTLBE_F_PHYS_REV) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1799 { /* not necessary */ }
1800 else
1801 {
1802 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
1803 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
1804 AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
1805 pTlbe->fFlagsAndPhysRev &= ~( IEMTLBE_F_PHYS_REV
1806 | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ | IEMTLBE_F_PG_NO_WRITE);
1807 int rc = PGMPhysIemGCPhys2PtrNoLock(pVCpu->CTX_SUFF(pVM), pVCpu, pTlbe->GCPhys, &pVCpu->iem.s.CodeTlb.uTlbPhysRev,
1808 &pTlbe->pbMappingR3, &pTlbe->fFlagsAndPhysRev);
1809 AssertRCStmt(rc, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), rc));
1810 }
1811
1812# if defined(IN_RING3) || defined(IN_RING0) /** @todo fixme */
1813 /*
1814 * Try do a direct read using the pbMappingR3 pointer.
1815 */
1816 if ( (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_NO_MAPPINGR3 | IEMTLBE_F_PG_NO_READ))
1817 == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1818 {
1819 uint32_t const offPg = (GCPtrFirst & X86_PAGE_OFFSET_MASK);
1820 pVCpu->iem.s.cbInstrBufTotal = offPg + cbMaxRead;
1821 if (offBuf == (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart)
1822 {
1823 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(15, cbMaxRead);
1824 pVCpu->iem.s.offCurInstrStart = (int16_t)offPg;
1825 }
1826 else
1827 {
1828 uint32_t const cbInstr = offBuf - (uint32_t)(int32_t)pVCpu->iem.s.offCurInstrStart;
1829 Assert(cbInstr < cbMaxRead);
1830 pVCpu->iem.s.cbInstrBuf = offPg + RT_MIN(cbMaxRead + cbInstr, 15) - cbInstr;
1831 pVCpu->iem.s.offCurInstrStart = (int16_t)(offPg - cbInstr);
1832 }
1833 if (cbDst <= cbMaxRead)
1834 {
1835 pVCpu->iem.s.offInstrNextByte = offPg + (uint32_t)cbDst;
1836 pVCpu->iem.s.uInstrBufPc = GCPtrFirst & ~(RTGCPTR)X86_PAGE_OFFSET_MASK;
1837 pVCpu->iem.s.pbInstrBuf = pTlbe->pbMappingR3;
1838 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbDst);
1839 return;
1840 }
1841 pVCpu->iem.s.pbInstrBuf = NULL;
1842
1843 memcpy(pvDst, &pTlbe->pbMappingR3[offPg], cbMaxRead);
1844 pVCpu->iem.s.offInstrNextByte = offPg + cbMaxRead;
1845 }
1846 else
1847# endif
1848#if 0
1849 /*
1850 * If there is no special read handling, so we can read a bit more and
1851 * put it in the prefetch buffer.
1852 */
1853 if ( cbDst < cbMaxRead
1854 && (pTlbe->fFlagsAndPhysRev & (IEMTLBE_F_PHYS_REV | IEMTLBE_F_PG_NO_READ)) == pVCpu->iem.s.CodeTlb.uTlbPhysRev)
1855 {
1856 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys,
1857 &pVCpu->iem.s.abOpcode[0], cbToTryRead, PGMACCESSORIGIN_IEM);
1858 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1859 { /* likely */ }
1860 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1861 {
1862 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1863 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1864 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1865 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICRC_VAL(rcStrict)));
1866 }
1867 else
1868 {
1869 Log((RT_SUCCESS(rcStrict)
1870 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1871 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1872 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
1873 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1874 }
1875 }
1876 /*
1877 * Special read handling, so only read exactly what's needed.
1878 * This is a highly unlikely scenario.
1879 */
1880 else
1881#endif
1882 {
1883 pVCpu->iem.s.CodeTlb.cTlbSlowReadPath++;
1884 uint32_t const cbToRead = RT_MIN((uint32_t)cbDst, cbMaxRead);
1885 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK),
1886 pvDst, cbToRead, PGMACCESSORIGIN_IEM);
1887 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
1888 { /* likely */ }
1889 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
1890 {
1891 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
1892 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1893 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
1894 AssertStmt(rcStrict == VINF_SUCCESS, longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict)));
1895 }
1896 else
1897 {
1898 Log((RT_SUCCESS(rcStrict)
1899 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
1900 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
1901 GCPtrFirst, pTlbe->GCPhys + (GCPtrFirst & X86_PAGE_OFFSET_MASK), VBOXSTRICTRC_VAL(rcStrict), cbToRead));
1902 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
1903 }
1904 pVCpu->iem.s.offInstrNextByte = offBuf + cbToRead;
1905 if (cbToRead == cbDst)
1906 return;
1907 }
1908
1909 /*
1910 * More to read, loop.
1911 */
1912 cbDst -= cbMaxRead;
1913 pvDst = (uint8_t *)pvDst + cbMaxRead;
1914 }
1915#else
1916 RT_NOREF(pvDst, cbDst);
1917 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VERR_INTERNAL_ERROR);
1918#endif
1919}
1920
1921#else
1922
1923/**
1924 * Try fetch at least @a cbMin bytes more opcodes, raise the appropriate
1925 * exception if it fails.
1926 *
1927 * @returns Strict VBox status code.
1928 * @param pVCpu The cross context virtual CPU structure of the
1929 * calling thread.
1930 * @param cbMin The minimum number of bytes relative offOpcode
1931 * that must be read.
1932 */
1933IEM_STATIC VBOXSTRICTRC iemOpcodeFetchMoreBytes(PVMCPUCC pVCpu, size_t cbMin)
1934{
1935 /*
1936 * What we're doing here is very similar to iemMemMap/iemMemBounceBufferMap.
1937 *
1938 * First translate CS:rIP to a physical address.
1939 */
1940 uint8_t cbLeft = pVCpu->iem.s.cbOpcode - pVCpu->iem.s.offOpcode; Assert(cbLeft < cbMin);
1941 uint32_t cbToTryRead;
1942 RTGCPTR GCPtrNext;
1943 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
1944 {
1945 cbToTryRead = PAGE_SIZE;
1946 GCPtrNext = pVCpu->cpum.GstCtx.rip + pVCpu->iem.s.cbOpcode;
1947 if (!IEM_IS_CANONICAL(GCPtrNext))
1948 return iemRaiseGeneralProtectionFault0(pVCpu);
1949 }
1950 else
1951 {
1952 uint32_t GCPtrNext32 = pVCpu->cpum.GstCtx.eip;
1953 Assert(!(GCPtrNext32 & ~(uint32_t)UINT16_MAX) || pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT);
1954 GCPtrNext32 += pVCpu->iem.s.cbOpcode;
1955 if (GCPtrNext32 > pVCpu->cpum.GstCtx.cs.u32Limit)
1956 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1957 cbToTryRead = pVCpu->cpum.GstCtx.cs.u32Limit - GCPtrNext32 + 1;
1958 if (!cbToTryRead) /* overflowed */
1959 {
1960 Assert(GCPtrNext32 == 0); Assert(pVCpu->cpum.GstCtx.cs.u32Limit == UINT32_MAX);
1961 cbToTryRead = UINT32_MAX;
1962 /** @todo check out wrapping around the code segment. */
1963 }
1964 if (cbToTryRead < cbMin - cbLeft)
1965 return iemRaiseSelectorBounds(pVCpu, X86_SREG_CS, IEM_ACCESS_INSTRUCTION);
1966 GCPtrNext = (uint32_t)pVCpu->cpum.GstCtx.cs.u64Base + GCPtrNext32;
1967 }
1968
1969 /* Only read up to the end of the page, and make sure we don't read more
1970 than the opcode buffer can hold. */
1971 uint32_t cbLeftOnPage = PAGE_SIZE - (GCPtrNext & PAGE_OFFSET_MASK);
1972 if (cbToTryRead > cbLeftOnPage)
1973 cbToTryRead = cbLeftOnPage;
1974 if (cbToTryRead > sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode)
1975 cbToTryRead = sizeof(pVCpu->iem.s.abOpcode) - pVCpu->iem.s.cbOpcode;
1976/** @todo r=bird: Convert assertion into undefined opcode exception? */
1977 Assert(cbToTryRead >= cbMin - cbLeft); /* ASSUMPTION based on iemInitDecoderAndPrefetchOpcodes. */
1978
1979 PGMPTWALK Walk;
1980 int rc = PGMGstGetPage(pVCpu, GCPtrNext, &Walk);
1981 if (RT_FAILURE(rc))
1982 {
1983 Log(("iemOpcodeFetchMoreBytes: %RGv - rc=%Rrc\n", GCPtrNext, rc));
1984 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, rc);
1985 }
1986 if (!(Walk.fEffective & X86_PTE_US) && pVCpu->iem.s.uCpl == 3)
1987 {
1988 Log(("iemOpcodeFetchMoreBytes: %RGv - supervisor page\n", GCPtrNext));
1989 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1990 }
1991 if ((Walk.fEffective & X86_PTE_PAE_NX) && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE))
1992 {
1993 Log(("iemOpcodeFetchMoreBytes: %RGv - NX\n", GCPtrNext));
1994 return iemRaisePageFault(pVCpu, GCPtrNext, IEM_ACCESS_INSTRUCTION, VERR_ACCESS_DENIED);
1995 }
1996 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrNext & PAGE_OFFSET_MASK);
1997 Log5(("GCPtrNext=%RGv GCPhys=%RGp cbOpcodes=%#x\n", GCPtrNext, GCPhys, pVCpu->iem.s.cbOpcode));
1998 /** @todo Check reserved bits and such stuff. PGM is better at doing
1999 * that, so do it when implementing the guest virtual address
2000 * TLB... */
2001
2002 /*
2003 * Read the bytes at this address.
2004 *
2005 * We read all unpatched bytes in iemInitDecoderAndPrefetchOpcodes already,
2006 * and since PATM should only patch the start of an instruction there
2007 * should be no need to check again here.
2008 */
2009 if (!pVCpu->iem.s.fBypassHandlers)
2010 {
2011 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhys, &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode],
2012 cbToTryRead, PGMACCESSORIGIN_IEM);
2013 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
2014 { /* likely */ }
2015 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
2016 {
2017 Log(("iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n",
2018 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2019 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
2020 }
2021 else
2022 {
2023 Log((RT_SUCCESS(rcStrict)
2024 ? "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read status - rcStrict=%Rrc\n"
2025 : "iemOpcodeFetchMoreBytes: %RGv/%RGp LB %#x - read error - rcStrict=%Rrc (!!)\n",
2026 GCPtrNext, GCPhys, VBOXSTRICTRC_VAL(rcStrict), cbToTryRead));
2027 return rcStrict;
2028 }
2029 }
2030 else
2031 {
2032 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.abOpcode[pVCpu->iem.s.cbOpcode], GCPhys, cbToTryRead);
2033 if (RT_SUCCESS(rc))
2034 { /* likely */ }
2035 else
2036 {
2037 Log(("iemOpcodeFetchMoreBytes: %RGv - read error - rc=%Rrc (!!)\n", GCPtrNext, rc));
2038 return rc;
2039 }
2040 }
2041 pVCpu->iem.s.cbOpcode += cbToTryRead;
2042 Log5(("%.*Rhxs\n", pVCpu->iem.s.cbOpcode, pVCpu->iem.s.abOpcode));
2043
2044 return VINF_SUCCESS;
2045}
2046
2047#endif /* !IEM_WITH_CODE_TLB */
2048#ifndef IEM_WITH_SETJMP
2049
2050/**
2051 * Deals with the problematic cases that iemOpcodeGetNextU8 doesn't like.
2052 *
2053 * @returns Strict VBox status code.
2054 * @param pVCpu The cross context virtual CPU structure of the
2055 * calling thread.
2056 * @param pb Where to return the opcode byte.
2057 */
2058DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU8Slow(PVMCPUCC pVCpu, uint8_t *pb)
2059{
2060 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2061 if (rcStrict == VINF_SUCCESS)
2062 {
2063 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2064 *pb = pVCpu->iem.s.abOpcode[offOpcode];
2065 pVCpu->iem.s.offOpcode = offOpcode + 1;
2066 }
2067 else
2068 *pb = 0;
2069 return rcStrict;
2070}
2071
2072
2073/**
2074 * Fetches the next opcode byte.
2075 *
2076 * @returns Strict VBox status code.
2077 * @param pVCpu The cross context virtual CPU structure of the
2078 * calling thread.
2079 * @param pu8 Where to return the opcode byte.
2080 */
2081DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU8(PVMCPUCC pVCpu, uint8_t *pu8)
2082{
2083 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2084 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2085 {
2086 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2087 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2088 return VINF_SUCCESS;
2089 }
2090 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2091}
2092
2093#else /* IEM_WITH_SETJMP */
2094
2095/**
2096 * Deals with the problematic cases that iemOpcodeGetNextU8Jmp doesn't like, longjmp on error.
2097 *
2098 * @returns The opcode byte.
2099 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2100 */
2101DECL_NO_INLINE(IEM_STATIC, uint8_t) iemOpcodeGetNextU8SlowJmp(PVMCPUCC pVCpu)
2102{
2103# ifdef IEM_WITH_CODE_TLB
2104 uint8_t u8;
2105 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u8), &u8);
2106 return u8;
2107# else
2108 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 1);
2109 if (rcStrict == VINF_SUCCESS)
2110 return pVCpu->iem.s.abOpcode[pVCpu->iem.s.offOpcode++];
2111 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2112# endif
2113}
2114
2115
2116/**
2117 * Fetches the next opcode byte, longjmp on error.
2118 *
2119 * @returns The opcode byte.
2120 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2121 */
2122DECLINLINE(uint8_t) iemOpcodeGetNextU8Jmp(PVMCPUCC pVCpu)
2123{
2124# ifdef IEM_WITH_CODE_TLB
2125 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2126 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2127 if (RT_LIKELY( pbBuf != NULL
2128 && offBuf < pVCpu->iem.s.cbInstrBuf))
2129 {
2130 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2131 return pbBuf[offBuf];
2132 }
2133# else
2134 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2135 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2136 {
2137 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2138 return pVCpu->iem.s.abOpcode[offOpcode];
2139 }
2140# endif
2141 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2142}
2143
2144#endif /* IEM_WITH_SETJMP */
2145
2146/**
2147 * Fetches the next opcode byte, returns automatically on failure.
2148 *
2149 * @param a_pu8 Where to return the opcode byte.
2150 * @remark Implicitly references pVCpu.
2151 */
2152#ifndef IEM_WITH_SETJMP
2153# define IEM_OPCODE_GET_NEXT_U8(a_pu8) \
2154 do \
2155 { \
2156 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU8(pVCpu, (a_pu8)); \
2157 if (rcStrict2 == VINF_SUCCESS) \
2158 { /* likely */ } \
2159 else \
2160 return rcStrict2; \
2161 } while (0)
2162#else
2163# define IEM_OPCODE_GET_NEXT_U8(a_pu8) (*(a_pu8) = iemOpcodeGetNextU8Jmp(pVCpu))
2164#endif /* IEM_WITH_SETJMP */
2165
2166
2167#ifndef IEM_WITH_SETJMP
2168/**
2169 * Fetches the next signed byte from the opcode stream.
2170 *
2171 * @returns Strict VBox status code.
2172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2173 * @param pi8 Where to return the signed byte.
2174 */
2175DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8(PVMCPUCC pVCpu, int8_t *pi8)
2176{
2177 return iemOpcodeGetNextU8(pVCpu, (uint8_t *)pi8);
2178}
2179#endif /* !IEM_WITH_SETJMP */
2180
2181
2182/**
2183 * Fetches the next signed byte from the opcode stream, returning automatically
2184 * on failure.
2185 *
2186 * @param a_pi8 Where to return the signed byte.
2187 * @remark Implicitly references pVCpu.
2188 */
2189#ifndef IEM_WITH_SETJMP
2190# define IEM_OPCODE_GET_NEXT_S8(a_pi8) \
2191 do \
2192 { \
2193 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8(pVCpu, (a_pi8)); \
2194 if (rcStrict2 != VINF_SUCCESS) \
2195 return rcStrict2; \
2196 } while (0)
2197#else /* IEM_WITH_SETJMP */
2198# define IEM_OPCODE_GET_NEXT_S8(a_pi8) (*(a_pi8) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2199
2200#endif /* IEM_WITH_SETJMP */
2201
2202#ifndef IEM_WITH_SETJMP
2203
2204/**
2205 * Deals with the problematic cases that iemOpcodeGetNextS8SxU16 doesn't like.
2206 *
2207 * @returns Strict VBox status code.
2208 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2209 * @param pu16 Where to return the opcode dword.
2210 */
2211DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2212{
2213 uint8_t u8;
2214 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2215 if (rcStrict == VINF_SUCCESS)
2216 *pu16 = (int8_t)u8;
2217 return rcStrict;
2218}
2219
2220
2221/**
2222 * Fetches the next signed byte from the opcode stream, extending it to
2223 * unsigned 16-bit.
2224 *
2225 * @returns Strict VBox status code.
2226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2227 * @param pu16 Where to return the unsigned word.
2228 */
2229DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU16(PVMCPUCC pVCpu, uint16_t *pu16)
2230{
2231 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2232 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2233 return iemOpcodeGetNextS8SxU16Slow(pVCpu, pu16);
2234
2235 *pu16 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2236 pVCpu->iem.s.offOpcode = offOpcode + 1;
2237 return VINF_SUCCESS;
2238}
2239
2240#endif /* !IEM_WITH_SETJMP */
2241
2242/**
2243 * Fetches the next signed byte from the opcode stream and sign-extending it to
2244 * a word, returning automatically on failure.
2245 *
2246 * @param a_pu16 Where to return the word.
2247 * @remark Implicitly references pVCpu.
2248 */
2249#ifndef IEM_WITH_SETJMP
2250# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) \
2251 do \
2252 { \
2253 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU16(pVCpu, (a_pu16)); \
2254 if (rcStrict2 != VINF_SUCCESS) \
2255 return rcStrict2; \
2256 } while (0)
2257#else
2258# define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) (*(a_pu16) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2259#endif
2260
2261#ifndef IEM_WITH_SETJMP
2262
2263/**
2264 * Deals with the problematic cases that iemOpcodeGetNextS8SxU32 doesn't like.
2265 *
2266 * @returns Strict VBox status code.
2267 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2268 * @param pu32 Where to return the opcode dword.
2269 */
2270DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2271{
2272 uint8_t u8;
2273 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2274 if (rcStrict == VINF_SUCCESS)
2275 *pu32 = (int8_t)u8;
2276 return rcStrict;
2277}
2278
2279
2280/**
2281 * Fetches the next signed byte from the opcode stream, extending it to
2282 * unsigned 32-bit.
2283 *
2284 * @returns Strict VBox status code.
2285 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2286 * @param pu32 Where to return the unsigned dword.
2287 */
2288DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2289{
2290 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2291 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2292 return iemOpcodeGetNextS8SxU32Slow(pVCpu, pu32);
2293
2294 *pu32 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2295 pVCpu->iem.s.offOpcode = offOpcode + 1;
2296 return VINF_SUCCESS;
2297}
2298
2299#endif /* !IEM_WITH_SETJMP */
2300
2301/**
2302 * Fetches the next signed byte from the opcode stream and sign-extending it to
2303 * a word, returning automatically on failure.
2304 *
2305 * @param a_pu32 Where to return the word.
2306 * @remark Implicitly references pVCpu.
2307 */
2308#ifndef IEM_WITH_SETJMP
2309#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) \
2310 do \
2311 { \
2312 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU32(pVCpu, (a_pu32)); \
2313 if (rcStrict2 != VINF_SUCCESS) \
2314 return rcStrict2; \
2315 } while (0)
2316#else
2317# define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) (*(a_pu32) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2318#endif
2319
2320#ifndef IEM_WITH_SETJMP
2321
2322/**
2323 * Deals with the problematic cases that iemOpcodeGetNextS8SxU64 doesn't like.
2324 *
2325 * @returns Strict VBox status code.
2326 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2327 * @param pu64 Where to return the opcode qword.
2328 */
2329DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS8SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2330{
2331 uint8_t u8;
2332 VBOXSTRICTRC rcStrict = iemOpcodeGetNextU8Slow(pVCpu, &u8);
2333 if (rcStrict == VINF_SUCCESS)
2334 *pu64 = (int8_t)u8;
2335 return rcStrict;
2336}
2337
2338
2339/**
2340 * Fetches the next signed byte from the opcode stream, extending it to
2341 * unsigned 64-bit.
2342 *
2343 * @returns Strict VBox status code.
2344 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2345 * @param pu64 Where to return the unsigned qword.
2346 */
2347DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS8SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2348{
2349 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2350 if (RT_UNLIKELY(offOpcode >= pVCpu->iem.s.cbOpcode))
2351 return iemOpcodeGetNextS8SxU64Slow(pVCpu, pu64);
2352
2353 *pu64 = (int8_t)pVCpu->iem.s.abOpcode[offOpcode];
2354 pVCpu->iem.s.offOpcode = offOpcode + 1;
2355 return VINF_SUCCESS;
2356}
2357
2358#endif /* !IEM_WITH_SETJMP */
2359
2360
2361/**
2362 * Fetches the next signed byte from the opcode stream and sign-extending it to
2363 * a word, returning automatically on failure.
2364 *
2365 * @param a_pu64 Where to return the word.
2366 * @remark Implicitly references pVCpu.
2367 */
2368#ifndef IEM_WITH_SETJMP
2369# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) \
2370 do \
2371 { \
2372 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS8SxU64(pVCpu, (a_pu64)); \
2373 if (rcStrict2 != VINF_SUCCESS) \
2374 return rcStrict2; \
2375 } while (0)
2376#else
2377# define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) (*(a_pu64) = (int8_t)iemOpcodeGetNextU8Jmp(pVCpu))
2378#endif
2379
2380
2381#ifndef IEM_WITH_SETJMP
2382/**
2383 * Fetches the next opcode byte.
2384 *
2385 * @returns Strict VBox status code.
2386 * @param pVCpu The cross context virtual CPU structure of the
2387 * calling thread.
2388 * @param pu8 Where to return the opcode byte.
2389 */
2390DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextRm(PVMCPUCC pVCpu, uint8_t *pu8)
2391{
2392 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2393 pVCpu->iem.s.offModRm = offOpcode;
2394 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2395 {
2396 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2397 *pu8 = pVCpu->iem.s.abOpcode[offOpcode];
2398 return VINF_SUCCESS;
2399 }
2400 return iemOpcodeGetNextU8Slow(pVCpu, pu8);
2401}
2402#else /* IEM_WITH_SETJMP */
2403/**
2404 * Fetches the next opcode byte, longjmp on error.
2405 *
2406 * @returns The opcode byte.
2407 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2408 */
2409DECLINLINE(uint8_t) iemOpcodeGetNextRmJmp(PVMCPUCC pVCpu)
2410{
2411# ifdef IEM_WITH_CODE_TLB
2412 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2413 pVCpu->iem.s.offModRm = offBuf;
2414 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2415 if (RT_LIKELY( pbBuf != NULL
2416 && offBuf < pVCpu->iem.s.cbInstrBuf))
2417 {
2418 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 1;
2419 return pbBuf[offBuf];
2420 }
2421# else
2422 uintptr_t offOpcode = pVCpu->iem.s.offOpcode;
2423 pVCpu->iem.s.offModRm = offOpcode;
2424 if (RT_LIKELY((uint8_t)offOpcode < pVCpu->iem.s.cbOpcode))
2425 {
2426 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 1;
2427 return pVCpu->iem.s.abOpcode[offOpcode];
2428 }
2429# endif
2430 return iemOpcodeGetNextU8SlowJmp(pVCpu);
2431}
2432#endif /* IEM_WITH_SETJMP */
2433
2434/**
2435 * Fetches the next opcode byte, which is a ModR/M byte, returns automatically
2436 * on failure.
2437 *
2438 * Will note down the position of the ModR/M byte for VT-x exits.
2439 *
2440 * @param a_pbRm Where to return the RM opcode byte.
2441 * @remark Implicitly references pVCpu.
2442 */
2443#ifndef IEM_WITH_SETJMP
2444# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) \
2445 do \
2446 { \
2447 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextRm(pVCpu, (a_pbRm)); \
2448 if (rcStrict2 == VINF_SUCCESS) \
2449 { /* likely */ } \
2450 else \
2451 return rcStrict2; \
2452 } while (0)
2453#else
2454# define IEM_OPCODE_GET_NEXT_RM(a_pbRm) (*(a_pbRm) = iemOpcodeGetNextRmJmp(pVCpu))
2455#endif /* IEM_WITH_SETJMP */
2456
2457
2458#ifndef IEM_WITH_SETJMP
2459
2460/**
2461 * Deals with the problematic cases that iemOpcodeGetNextU16 doesn't like.
2462 *
2463 * @returns Strict VBox status code.
2464 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2465 * @param pu16 Where to return the opcode word.
2466 */
2467DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16Slow(PVMCPUCC pVCpu, uint16_t *pu16)
2468{
2469 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2470 if (rcStrict == VINF_SUCCESS)
2471 {
2472 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2473# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2474 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2475# else
2476 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2477# endif
2478 pVCpu->iem.s.offOpcode = offOpcode + 2;
2479 }
2480 else
2481 *pu16 = 0;
2482 return rcStrict;
2483}
2484
2485
2486/**
2487 * Fetches the next opcode word.
2488 *
2489 * @returns Strict VBox status code.
2490 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2491 * @param pu16 Where to return the opcode word.
2492 */
2493DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16(PVMCPUCC pVCpu, uint16_t *pu16)
2494{
2495 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2496 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2497 {
2498 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2499# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2500 *pu16 = *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2501# else
2502 *pu16 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2503# endif
2504 return VINF_SUCCESS;
2505 }
2506 return iemOpcodeGetNextU16Slow(pVCpu, pu16);
2507}
2508
2509#else /* IEM_WITH_SETJMP */
2510
2511/**
2512 * Deals with the problematic cases that iemOpcodeGetNextU16Jmp doesn't like, longjmp on error
2513 *
2514 * @returns The opcode word.
2515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2516 */
2517DECL_NO_INLINE(IEM_STATIC, uint16_t) iemOpcodeGetNextU16SlowJmp(PVMCPUCC pVCpu)
2518{
2519# ifdef IEM_WITH_CODE_TLB
2520 uint16_t u16;
2521 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u16), &u16);
2522 return u16;
2523# else
2524 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2525 if (rcStrict == VINF_SUCCESS)
2526 {
2527 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2528 pVCpu->iem.s.offOpcode += 2;
2529# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2530 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2531# else
2532 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2533# endif
2534 }
2535 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2536# endif
2537}
2538
2539
2540/**
2541 * Fetches the next opcode word, longjmp on error.
2542 *
2543 * @returns The opcode word.
2544 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2545 */
2546DECLINLINE(uint16_t) iemOpcodeGetNextU16Jmp(PVMCPUCC pVCpu)
2547{
2548# ifdef IEM_WITH_CODE_TLB
2549 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2550 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2551 if (RT_LIKELY( pbBuf != NULL
2552 && offBuf + 2 <= pVCpu->iem.s.cbInstrBuf))
2553 {
2554 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 2;
2555# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2556 return *(uint16_t const *)&pbBuf[offBuf];
2557# else
2558 return RT_MAKE_U16(pbBuf[offBuf], pbBuf[offBuf + 1]);
2559# endif
2560 }
2561# else
2562 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2563 if (RT_LIKELY((uint8_t)offOpcode + 2 <= pVCpu->iem.s.cbOpcode))
2564 {
2565 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 2;
2566# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2567 return *(uint16_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2568# else
2569 return RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2570# endif
2571 }
2572# endif
2573 return iemOpcodeGetNextU16SlowJmp(pVCpu);
2574}
2575
2576#endif /* IEM_WITH_SETJMP */
2577
2578
2579/**
2580 * Fetches the next opcode word, returns automatically on failure.
2581 *
2582 * @param a_pu16 Where to return the opcode word.
2583 * @remark Implicitly references pVCpu.
2584 */
2585#ifndef IEM_WITH_SETJMP
2586# define IEM_OPCODE_GET_NEXT_U16(a_pu16) \
2587 do \
2588 { \
2589 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16(pVCpu, (a_pu16)); \
2590 if (rcStrict2 != VINF_SUCCESS) \
2591 return rcStrict2; \
2592 } while (0)
2593#else
2594# define IEM_OPCODE_GET_NEXT_U16(a_pu16) (*(a_pu16) = iemOpcodeGetNextU16Jmp(pVCpu))
2595#endif
2596
2597#ifndef IEM_WITH_SETJMP
2598
2599/**
2600 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU32 doesn't like.
2601 *
2602 * @returns Strict VBox status code.
2603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2604 * @param pu32 Where to return the opcode double word.
2605 */
2606DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2607{
2608 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2609 if (rcStrict == VINF_SUCCESS)
2610 {
2611 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2612 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2613 pVCpu->iem.s.offOpcode = offOpcode + 2;
2614 }
2615 else
2616 *pu32 = 0;
2617 return rcStrict;
2618}
2619
2620
2621/**
2622 * Fetches the next opcode word, zero extending it to a double word.
2623 *
2624 * @returns Strict VBox status code.
2625 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2626 * @param pu32 Where to return the opcode double word.
2627 */
2628DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU32(PVMCPUCC pVCpu, uint32_t *pu32)
2629{
2630 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2631 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2632 return iemOpcodeGetNextU16ZxU32Slow(pVCpu, pu32);
2633
2634 *pu32 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2635 pVCpu->iem.s.offOpcode = offOpcode + 2;
2636 return VINF_SUCCESS;
2637}
2638
2639#endif /* !IEM_WITH_SETJMP */
2640
2641
2642/**
2643 * Fetches the next opcode word and zero extends it to a double word, returns
2644 * automatically on failure.
2645 *
2646 * @param a_pu32 Where to return the opcode double word.
2647 * @remark Implicitly references pVCpu.
2648 */
2649#ifndef IEM_WITH_SETJMP
2650# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) \
2651 do \
2652 { \
2653 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU32(pVCpu, (a_pu32)); \
2654 if (rcStrict2 != VINF_SUCCESS) \
2655 return rcStrict2; \
2656 } while (0)
2657#else
2658# define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU16Jmp(pVCpu))
2659#endif
2660
2661#ifndef IEM_WITH_SETJMP
2662
2663/**
2664 * Deals with the problematic cases that iemOpcodeGetNextU16ZxU64 doesn't like.
2665 *
2666 * @returns Strict VBox status code.
2667 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2668 * @param pu64 Where to return the opcode quad word.
2669 */
2670DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2671{
2672 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 2);
2673 if (rcStrict == VINF_SUCCESS)
2674 {
2675 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2676 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2677 pVCpu->iem.s.offOpcode = offOpcode + 2;
2678 }
2679 else
2680 *pu64 = 0;
2681 return rcStrict;
2682}
2683
2684
2685/**
2686 * Fetches the next opcode word, zero extending it to a quad word.
2687 *
2688 * @returns Strict VBox status code.
2689 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2690 * @param pu64 Where to return the opcode quad word.
2691 */
2692DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU16ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2693{
2694 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2695 if (RT_UNLIKELY(offOpcode + 2 > pVCpu->iem.s.cbOpcode))
2696 return iemOpcodeGetNextU16ZxU64Slow(pVCpu, pu64);
2697
2698 *pu64 = RT_MAKE_U16(pVCpu->iem.s.abOpcode[offOpcode], pVCpu->iem.s.abOpcode[offOpcode + 1]);
2699 pVCpu->iem.s.offOpcode = offOpcode + 2;
2700 return VINF_SUCCESS;
2701}
2702
2703#endif /* !IEM_WITH_SETJMP */
2704
2705/**
2706 * Fetches the next opcode word and zero extends it to a quad word, returns
2707 * automatically on failure.
2708 *
2709 * @param a_pu64 Where to return the opcode quad word.
2710 * @remark Implicitly references pVCpu.
2711 */
2712#ifndef IEM_WITH_SETJMP
2713# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) \
2714 do \
2715 { \
2716 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU16ZxU64(pVCpu, (a_pu64)); \
2717 if (rcStrict2 != VINF_SUCCESS) \
2718 return rcStrict2; \
2719 } while (0)
2720#else
2721# define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU16Jmp(pVCpu))
2722#endif
2723
2724
2725#ifndef IEM_WITH_SETJMP
2726/**
2727 * Fetches the next signed word from the opcode stream.
2728 *
2729 * @returns Strict VBox status code.
2730 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2731 * @param pi16 Where to return the signed word.
2732 */
2733DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS16(PVMCPUCC pVCpu, int16_t *pi16)
2734{
2735 return iemOpcodeGetNextU16(pVCpu, (uint16_t *)pi16);
2736}
2737#endif /* !IEM_WITH_SETJMP */
2738
2739
2740/**
2741 * Fetches the next signed word from the opcode stream, returning automatically
2742 * on failure.
2743 *
2744 * @param a_pi16 Where to return the signed word.
2745 * @remark Implicitly references pVCpu.
2746 */
2747#ifndef IEM_WITH_SETJMP
2748# define IEM_OPCODE_GET_NEXT_S16(a_pi16) \
2749 do \
2750 { \
2751 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS16(pVCpu, (a_pi16)); \
2752 if (rcStrict2 != VINF_SUCCESS) \
2753 return rcStrict2; \
2754 } while (0)
2755#else
2756# define IEM_OPCODE_GET_NEXT_S16(a_pi16) (*(a_pi16) = (int16_t)iemOpcodeGetNextU16Jmp(pVCpu))
2757#endif
2758
2759#ifndef IEM_WITH_SETJMP
2760
2761/**
2762 * Deals with the problematic cases that iemOpcodeGetNextU32 doesn't like.
2763 *
2764 * @returns Strict VBox status code.
2765 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2766 * @param pu32 Where to return the opcode dword.
2767 */
2768DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32Slow(PVMCPUCC pVCpu, uint32_t *pu32)
2769{
2770 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2771 if (rcStrict == VINF_SUCCESS)
2772 {
2773 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2774# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2775 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2776# else
2777 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2778 pVCpu->iem.s.abOpcode[offOpcode + 1],
2779 pVCpu->iem.s.abOpcode[offOpcode + 2],
2780 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2781# endif
2782 pVCpu->iem.s.offOpcode = offOpcode + 4;
2783 }
2784 else
2785 *pu32 = 0;
2786 return rcStrict;
2787}
2788
2789
2790/**
2791 * Fetches the next opcode dword.
2792 *
2793 * @returns Strict VBox status code.
2794 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2795 * @param pu32 Where to return the opcode double word.
2796 */
2797DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32(PVMCPUCC pVCpu, uint32_t *pu32)
2798{
2799 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2800 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2801 {
2802 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2803# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2804 *pu32 = *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2805# else
2806 *pu32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2807 pVCpu->iem.s.abOpcode[offOpcode + 1],
2808 pVCpu->iem.s.abOpcode[offOpcode + 2],
2809 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2810# endif
2811 return VINF_SUCCESS;
2812 }
2813 return iemOpcodeGetNextU32Slow(pVCpu, pu32);
2814}
2815
2816#else /* !IEM_WITH_SETJMP */
2817
2818/**
2819 * Deals with the problematic cases that iemOpcodeGetNextU32Jmp doesn't like, longjmp on error.
2820 *
2821 * @returns The opcode dword.
2822 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2823 */
2824DECL_NO_INLINE(IEM_STATIC, uint32_t) iemOpcodeGetNextU32SlowJmp(PVMCPUCC pVCpu)
2825{
2826# ifdef IEM_WITH_CODE_TLB
2827 uint32_t u32;
2828 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u32), &u32);
2829 return u32;
2830# else
2831 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2832 if (rcStrict == VINF_SUCCESS)
2833 {
2834 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2835 pVCpu->iem.s.offOpcode = offOpcode + 4;
2836# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2837 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2838# else
2839 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2840 pVCpu->iem.s.abOpcode[offOpcode + 1],
2841 pVCpu->iem.s.abOpcode[offOpcode + 2],
2842 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2843# endif
2844 }
2845 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
2846# endif
2847}
2848
2849
2850/**
2851 * Fetches the next opcode dword, longjmp on error.
2852 *
2853 * @returns The opcode dword.
2854 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2855 */
2856DECLINLINE(uint32_t) iemOpcodeGetNextU32Jmp(PVMCPUCC pVCpu)
2857{
2858# ifdef IEM_WITH_CODE_TLB
2859 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
2860 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
2861 if (RT_LIKELY( pbBuf != NULL
2862 && offBuf + 4 <= pVCpu->iem.s.cbInstrBuf))
2863 {
2864 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 4;
2865# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2866 return *(uint32_t const *)&pbBuf[offBuf];
2867# else
2868 return RT_MAKE_U32_FROM_U8(pbBuf[offBuf],
2869 pbBuf[offBuf + 1],
2870 pbBuf[offBuf + 2],
2871 pbBuf[offBuf + 3]);
2872# endif
2873 }
2874# else
2875 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
2876 if (RT_LIKELY((uint8_t)offOpcode + 4 <= pVCpu->iem.s.cbOpcode))
2877 {
2878 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 4;
2879# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
2880 return *(uint32_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
2881# else
2882 return RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2883 pVCpu->iem.s.abOpcode[offOpcode + 1],
2884 pVCpu->iem.s.abOpcode[offOpcode + 2],
2885 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2886# endif
2887 }
2888# endif
2889 return iemOpcodeGetNextU32SlowJmp(pVCpu);
2890}
2891
2892#endif /* !IEM_WITH_SETJMP */
2893
2894
2895/**
2896 * Fetches the next opcode dword, returns automatically on failure.
2897 *
2898 * @param a_pu32 Where to return the opcode dword.
2899 * @remark Implicitly references pVCpu.
2900 */
2901#ifndef IEM_WITH_SETJMP
2902# define IEM_OPCODE_GET_NEXT_U32(a_pu32) \
2903 do \
2904 { \
2905 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32(pVCpu, (a_pu32)); \
2906 if (rcStrict2 != VINF_SUCCESS) \
2907 return rcStrict2; \
2908 } while (0)
2909#else
2910# define IEM_OPCODE_GET_NEXT_U32(a_pu32) (*(a_pu32) = iemOpcodeGetNextU32Jmp(pVCpu))
2911#endif
2912
2913#ifndef IEM_WITH_SETJMP
2914
2915/**
2916 * Deals with the problematic cases that iemOpcodeGetNextU32ZxU64 doesn't like.
2917 *
2918 * @returns Strict VBox status code.
2919 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2920 * @param pu64 Where to return the opcode dword.
2921 */
2922DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
2923{
2924 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
2925 if (rcStrict == VINF_SUCCESS)
2926 {
2927 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
2928 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2929 pVCpu->iem.s.abOpcode[offOpcode + 1],
2930 pVCpu->iem.s.abOpcode[offOpcode + 2],
2931 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2932 pVCpu->iem.s.offOpcode = offOpcode + 4;
2933 }
2934 else
2935 *pu64 = 0;
2936 return rcStrict;
2937}
2938
2939
2940/**
2941 * Fetches the next opcode dword, zero extending it to a quad word.
2942 *
2943 * @returns Strict VBox status code.
2944 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2945 * @param pu64 Where to return the opcode quad word.
2946 */
2947DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU32ZxU64(PVMCPUCC pVCpu, uint64_t *pu64)
2948{
2949 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
2950 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
2951 return iemOpcodeGetNextU32ZxU64Slow(pVCpu, pu64);
2952
2953 *pu64 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
2954 pVCpu->iem.s.abOpcode[offOpcode + 1],
2955 pVCpu->iem.s.abOpcode[offOpcode + 2],
2956 pVCpu->iem.s.abOpcode[offOpcode + 3]);
2957 pVCpu->iem.s.offOpcode = offOpcode + 4;
2958 return VINF_SUCCESS;
2959}
2960
2961#endif /* !IEM_WITH_SETJMP */
2962
2963
2964/**
2965 * Fetches the next opcode dword and zero extends it to a quad word, returns
2966 * automatically on failure.
2967 *
2968 * @param a_pu64 Where to return the opcode quad word.
2969 * @remark Implicitly references pVCpu.
2970 */
2971#ifndef IEM_WITH_SETJMP
2972# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) \
2973 do \
2974 { \
2975 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU32ZxU64(pVCpu, (a_pu64)); \
2976 if (rcStrict2 != VINF_SUCCESS) \
2977 return rcStrict2; \
2978 } while (0)
2979#else
2980# define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) (*(a_pu64) = iemOpcodeGetNextU32Jmp(pVCpu))
2981#endif
2982
2983
2984#ifndef IEM_WITH_SETJMP
2985/**
2986 * Fetches the next signed double word from the opcode stream.
2987 *
2988 * @returns Strict VBox status code.
2989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
2990 * @param pi32 Where to return the signed double word.
2991 */
2992DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32(PVMCPUCC pVCpu, int32_t *pi32)
2993{
2994 return iemOpcodeGetNextU32(pVCpu, (uint32_t *)pi32);
2995}
2996#endif
2997
2998/**
2999 * Fetches the next signed double word from the opcode stream, returning
3000 * automatically on failure.
3001 *
3002 * @param a_pi32 Where to return the signed double word.
3003 * @remark Implicitly references pVCpu.
3004 */
3005#ifndef IEM_WITH_SETJMP
3006# define IEM_OPCODE_GET_NEXT_S32(a_pi32) \
3007 do \
3008 { \
3009 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32(pVCpu, (a_pi32)); \
3010 if (rcStrict2 != VINF_SUCCESS) \
3011 return rcStrict2; \
3012 } while (0)
3013#else
3014# define IEM_OPCODE_GET_NEXT_S32(a_pi32) (*(a_pi32) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3015#endif
3016
3017#ifndef IEM_WITH_SETJMP
3018
3019/**
3020 * Deals with the problematic cases that iemOpcodeGetNextS32SxU64 doesn't like.
3021 *
3022 * @returns Strict VBox status code.
3023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3024 * @param pu64 Where to return the opcode qword.
3025 */
3026DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextS32SxU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3027{
3028 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 4);
3029 if (rcStrict == VINF_SUCCESS)
3030 {
3031 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3032 *pu64 = (int32_t)RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3033 pVCpu->iem.s.abOpcode[offOpcode + 1],
3034 pVCpu->iem.s.abOpcode[offOpcode + 2],
3035 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3036 pVCpu->iem.s.offOpcode = offOpcode + 4;
3037 }
3038 else
3039 *pu64 = 0;
3040 return rcStrict;
3041}
3042
3043
3044/**
3045 * Fetches the next opcode dword, sign extending it into a quad word.
3046 *
3047 * @returns Strict VBox status code.
3048 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3049 * @param pu64 Where to return the opcode quad word.
3050 */
3051DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64)
3052{
3053 uint8_t const offOpcode = pVCpu->iem.s.offOpcode;
3054 if (RT_UNLIKELY(offOpcode + 4 > pVCpu->iem.s.cbOpcode))
3055 return iemOpcodeGetNextS32SxU64Slow(pVCpu, pu64);
3056
3057 int32_t i32 = RT_MAKE_U32_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3058 pVCpu->iem.s.abOpcode[offOpcode + 1],
3059 pVCpu->iem.s.abOpcode[offOpcode + 2],
3060 pVCpu->iem.s.abOpcode[offOpcode + 3]);
3061 *pu64 = i32;
3062 pVCpu->iem.s.offOpcode = offOpcode + 4;
3063 return VINF_SUCCESS;
3064}
3065
3066#endif /* !IEM_WITH_SETJMP */
3067
3068
3069/**
3070 * Fetches the next opcode double word and sign extends it to a quad word,
3071 * returns automatically on failure.
3072 *
3073 * @param a_pu64 Where to return the opcode quad word.
3074 * @remark Implicitly references pVCpu.
3075 */
3076#ifndef IEM_WITH_SETJMP
3077# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) \
3078 do \
3079 { \
3080 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextS32SxU64(pVCpu, (a_pu64)); \
3081 if (rcStrict2 != VINF_SUCCESS) \
3082 return rcStrict2; \
3083 } while (0)
3084#else
3085# define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) (*(a_pu64) = (int32_t)iemOpcodeGetNextU32Jmp(pVCpu))
3086#endif
3087
3088#ifndef IEM_WITH_SETJMP
3089
3090/**
3091 * Deals with the problematic cases that iemOpcodeGetNextU64 doesn't like.
3092 *
3093 * @returns Strict VBox status code.
3094 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3095 * @param pu64 Where to return the opcode qword.
3096 */
3097DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemOpcodeGetNextU64Slow(PVMCPUCC pVCpu, uint64_t *pu64)
3098{
3099 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3100 if (rcStrict == VINF_SUCCESS)
3101 {
3102 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3103# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3104 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3105# else
3106 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3107 pVCpu->iem.s.abOpcode[offOpcode + 1],
3108 pVCpu->iem.s.abOpcode[offOpcode + 2],
3109 pVCpu->iem.s.abOpcode[offOpcode + 3],
3110 pVCpu->iem.s.abOpcode[offOpcode + 4],
3111 pVCpu->iem.s.abOpcode[offOpcode + 5],
3112 pVCpu->iem.s.abOpcode[offOpcode + 6],
3113 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3114# endif
3115 pVCpu->iem.s.offOpcode = offOpcode + 8;
3116 }
3117 else
3118 *pu64 = 0;
3119 return rcStrict;
3120}
3121
3122
3123/**
3124 * Fetches the next opcode qword.
3125 *
3126 * @returns Strict VBox status code.
3127 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3128 * @param pu64 Where to return the opcode qword.
3129 */
3130DECLINLINE(VBOXSTRICTRC) iemOpcodeGetNextU64(PVMCPUCC pVCpu, uint64_t *pu64)
3131{
3132 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3133 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3134 {
3135# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3136 *pu64 = *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3137# else
3138 *pu64 = RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3139 pVCpu->iem.s.abOpcode[offOpcode + 1],
3140 pVCpu->iem.s.abOpcode[offOpcode + 2],
3141 pVCpu->iem.s.abOpcode[offOpcode + 3],
3142 pVCpu->iem.s.abOpcode[offOpcode + 4],
3143 pVCpu->iem.s.abOpcode[offOpcode + 5],
3144 pVCpu->iem.s.abOpcode[offOpcode + 6],
3145 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3146# endif
3147 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3148 return VINF_SUCCESS;
3149 }
3150 return iemOpcodeGetNextU64Slow(pVCpu, pu64);
3151}
3152
3153#else /* IEM_WITH_SETJMP */
3154
3155/**
3156 * Deals with the problematic cases that iemOpcodeGetNextU64Jmp doesn't like, longjmp on error.
3157 *
3158 * @returns The opcode qword.
3159 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3160 */
3161DECL_NO_INLINE(IEM_STATIC, uint64_t) iemOpcodeGetNextU64SlowJmp(PVMCPUCC pVCpu)
3162{
3163# ifdef IEM_WITH_CODE_TLB
3164 uint64_t u64;
3165 iemOpcodeFetchBytesJmp(pVCpu, sizeof(u64), &u64);
3166 return u64;
3167# else
3168 VBOXSTRICTRC rcStrict = iemOpcodeFetchMoreBytes(pVCpu, 8);
3169 if (rcStrict == VINF_SUCCESS)
3170 {
3171 uint8_t offOpcode = pVCpu->iem.s.offOpcode;
3172 pVCpu->iem.s.offOpcode = offOpcode + 8;
3173# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3174 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3175# else
3176 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3177 pVCpu->iem.s.abOpcode[offOpcode + 1],
3178 pVCpu->iem.s.abOpcode[offOpcode + 2],
3179 pVCpu->iem.s.abOpcode[offOpcode + 3],
3180 pVCpu->iem.s.abOpcode[offOpcode + 4],
3181 pVCpu->iem.s.abOpcode[offOpcode + 5],
3182 pVCpu->iem.s.abOpcode[offOpcode + 6],
3183 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3184# endif
3185 }
3186 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
3187# endif
3188}
3189
3190
3191/**
3192 * Fetches the next opcode qword, longjmp on error.
3193 *
3194 * @returns The opcode qword.
3195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3196 */
3197DECLINLINE(uint64_t) iemOpcodeGetNextU64Jmp(PVMCPUCC pVCpu)
3198{
3199# ifdef IEM_WITH_CODE_TLB
3200 uintptr_t offBuf = pVCpu->iem.s.offInstrNextByte;
3201 uint8_t const *pbBuf = pVCpu->iem.s.pbInstrBuf;
3202 if (RT_LIKELY( pbBuf != NULL
3203 && offBuf + 8 <= pVCpu->iem.s.cbInstrBuf))
3204 {
3205 pVCpu->iem.s.offInstrNextByte = (uint32_t)offBuf + 8;
3206# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3207 return *(uint64_t const *)&pbBuf[offBuf];
3208# else
3209 return RT_MAKE_U64_FROM_U8(pbBuf[offBuf],
3210 pbBuf[offBuf + 1],
3211 pbBuf[offBuf + 2],
3212 pbBuf[offBuf + 3],
3213 pbBuf[offBuf + 4],
3214 pbBuf[offBuf + 5],
3215 pbBuf[offBuf + 6],
3216 pbBuf[offBuf + 7]);
3217# endif
3218 }
3219# else
3220 uintptr_t const offOpcode = pVCpu->iem.s.offOpcode;
3221 if (RT_LIKELY((uint8_t)offOpcode + 8 <= pVCpu->iem.s.cbOpcode))
3222 {
3223 pVCpu->iem.s.offOpcode = (uint8_t)offOpcode + 8;
3224# ifdef IEM_USE_UNALIGNED_DATA_ACCESS
3225 return *(uint64_t const *)&pVCpu->iem.s.abOpcode[offOpcode];
3226# else
3227 return RT_MAKE_U64_FROM_U8(pVCpu->iem.s.abOpcode[offOpcode],
3228 pVCpu->iem.s.abOpcode[offOpcode + 1],
3229 pVCpu->iem.s.abOpcode[offOpcode + 2],
3230 pVCpu->iem.s.abOpcode[offOpcode + 3],
3231 pVCpu->iem.s.abOpcode[offOpcode + 4],
3232 pVCpu->iem.s.abOpcode[offOpcode + 5],
3233 pVCpu->iem.s.abOpcode[offOpcode + 6],
3234 pVCpu->iem.s.abOpcode[offOpcode + 7]);
3235# endif
3236 }
3237# endif
3238 return iemOpcodeGetNextU64SlowJmp(pVCpu);
3239}
3240
3241#endif /* IEM_WITH_SETJMP */
3242
3243/**
3244 * Fetches the next opcode quad word, returns automatically on failure.
3245 *
3246 * @param a_pu64 Where to return the opcode quad word.
3247 * @remark Implicitly references pVCpu.
3248 */
3249#ifndef IEM_WITH_SETJMP
3250# define IEM_OPCODE_GET_NEXT_U64(a_pu64) \
3251 do \
3252 { \
3253 VBOXSTRICTRC rcStrict2 = iemOpcodeGetNextU64(pVCpu, (a_pu64)); \
3254 if (rcStrict2 != VINF_SUCCESS) \
3255 return rcStrict2; \
3256 } while (0)
3257#else
3258# define IEM_OPCODE_GET_NEXT_U64(a_pu64) ( *(a_pu64) = iemOpcodeGetNextU64Jmp(pVCpu) )
3259#endif
3260
3261
3262/** @name Misc Worker Functions.
3263 * @{
3264 */
3265
3266/**
3267 * Gets the exception class for the specified exception vector.
3268 *
3269 * @returns The class of the specified exception.
3270 * @param uVector The exception vector.
3271 */
3272IEM_STATIC IEMXCPTCLASS iemGetXcptClass(uint8_t uVector)
3273{
3274 Assert(uVector <= X86_XCPT_LAST);
3275 switch (uVector)
3276 {
3277 case X86_XCPT_DE:
3278 case X86_XCPT_TS:
3279 case X86_XCPT_NP:
3280 case X86_XCPT_SS:
3281 case X86_XCPT_GP:
3282 case X86_XCPT_SX: /* AMD only */
3283 return IEMXCPTCLASS_CONTRIBUTORY;
3284
3285 case X86_XCPT_PF:
3286 case X86_XCPT_VE: /* Intel only */
3287 return IEMXCPTCLASS_PAGE_FAULT;
3288
3289 case X86_XCPT_DF:
3290 return IEMXCPTCLASS_DOUBLE_FAULT;
3291 }
3292 return IEMXCPTCLASS_BENIGN;
3293}
3294
3295
3296/**
3297 * Evaluates how to handle an exception caused during delivery of another event
3298 * (exception / interrupt).
3299 *
3300 * @returns How to handle the recursive exception.
3301 * @param pVCpu The cross context virtual CPU structure of the
3302 * calling thread.
3303 * @param fPrevFlags The flags of the previous event.
3304 * @param uPrevVector The vector of the previous event.
3305 * @param fCurFlags The flags of the current exception.
3306 * @param uCurVector The vector of the current exception.
3307 * @param pfXcptRaiseInfo Where to store additional information about the
3308 * exception condition. Optional.
3309 */
3310VMM_INT_DECL(IEMXCPTRAISE) IEMEvaluateRecursiveXcpt(PVMCPUCC pVCpu, uint32_t fPrevFlags, uint8_t uPrevVector, uint32_t fCurFlags,
3311 uint8_t uCurVector, PIEMXCPTRAISEINFO pfXcptRaiseInfo)
3312{
3313 /*
3314 * Only CPU exceptions can be raised while delivering other events, software interrupt
3315 * (INTn/INT3/INTO/ICEBP) generated exceptions cannot occur as the current (second) exception.
3316 */
3317 AssertReturn(fCurFlags & IEM_XCPT_FLAGS_T_CPU_XCPT, IEMXCPTRAISE_INVALID);
3318 Assert(pVCpu); RT_NOREF(pVCpu);
3319 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x\n", uPrevVector, uCurVector));
3320
3321 IEMXCPTRAISE enmRaise = IEMXCPTRAISE_CURRENT_XCPT;
3322 IEMXCPTRAISEINFO fRaiseInfo = IEMXCPTRAISEINFO_NONE;
3323 if (fPrevFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3324 {
3325 IEMXCPTCLASS enmPrevXcptClass = iemGetXcptClass(uPrevVector);
3326 if (enmPrevXcptClass != IEMXCPTCLASS_BENIGN)
3327 {
3328 IEMXCPTCLASS enmCurXcptClass = iemGetXcptClass(uCurVector);
3329 if ( enmPrevXcptClass == IEMXCPTCLASS_PAGE_FAULT
3330 && ( enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT
3331 || enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY))
3332 {
3333 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3334 fRaiseInfo = enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT ? IEMXCPTRAISEINFO_PF_PF
3335 : IEMXCPTRAISEINFO_PF_CONTRIBUTORY_XCPT;
3336 Log2(("IEMEvaluateRecursiveXcpt: Vectoring page fault. uPrevVector=%#x uCurVector=%#x uCr2=%#RX64\n", uPrevVector,
3337 uCurVector, pVCpu->cpum.GstCtx.cr2));
3338 }
3339 else if ( enmPrevXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3340 && enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY)
3341 {
3342 enmRaise = IEMXCPTRAISE_DOUBLE_FAULT;
3343 Log2(("IEMEvaluateRecursiveXcpt: uPrevVector=%#x uCurVector=%#x -> #DF\n", uPrevVector, uCurVector));
3344 }
3345 else if ( enmPrevXcptClass == IEMXCPTCLASS_DOUBLE_FAULT
3346 && ( enmCurXcptClass == IEMXCPTCLASS_CONTRIBUTORY
3347 || enmCurXcptClass == IEMXCPTCLASS_PAGE_FAULT))
3348 {
3349 enmRaise = IEMXCPTRAISE_TRIPLE_FAULT;
3350 Log2(("IEMEvaluateRecursiveXcpt: #DF handler raised a %#x exception -> triple fault\n", uCurVector));
3351 }
3352 }
3353 else
3354 {
3355 if (uPrevVector == X86_XCPT_NMI)
3356 {
3357 fRaiseInfo = IEMXCPTRAISEINFO_NMI_XCPT;
3358 if (uCurVector == X86_XCPT_PF)
3359 {
3360 fRaiseInfo |= IEMXCPTRAISEINFO_NMI_PF;
3361 Log2(("IEMEvaluateRecursiveXcpt: NMI delivery caused a page fault\n"));
3362 }
3363 }
3364 else if ( uPrevVector == X86_XCPT_AC
3365 && uCurVector == X86_XCPT_AC)
3366 {
3367 enmRaise = IEMXCPTRAISE_CPU_HANG;
3368 fRaiseInfo = IEMXCPTRAISEINFO_AC_AC;
3369 Log2(("IEMEvaluateRecursiveXcpt: Recursive #AC - Bad guest\n"));
3370 }
3371 }
3372 }
3373 else if (fPrevFlags & IEM_XCPT_FLAGS_T_EXT_INT)
3374 {
3375 fRaiseInfo = IEMXCPTRAISEINFO_EXT_INT_XCPT;
3376 if (uCurVector == X86_XCPT_PF)
3377 fRaiseInfo |= IEMXCPTRAISEINFO_EXT_INT_PF;
3378 }
3379 else
3380 {
3381 Assert(fPrevFlags & IEM_XCPT_FLAGS_T_SOFT_INT);
3382 fRaiseInfo = IEMXCPTRAISEINFO_SOFT_INT_XCPT;
3383 }
3384
3385 if (pfXcptRaiseInfo)
3386 *pfXcptRaiseInfo = fRaiseInfo;
3387 return enmRaise;
3388}
3389
3390
3391/**
3392 * Enters the CPU shutdown state initiated by a triple fault or other
3393 * unrecoverable conditions.
3394 *
3395 * @returns Strict VBox status code.
3396 * @param pVCpu The cross context virtual CPU structure of the
3397 * calling thread.
3398 */
3399IEM_STATIC VBOXSTRICTRC iemInitiateCpuShutdown(PVMCPUCC pVCpu)
3400{
3401 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3402 IEM_VMX_VMEXIT_TRIPLE_FAULT_RET(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
3403
3404 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_SHUTDOWN))
3405 {
3406 Log2(("shutdown: Guest intercept -> #VMEXIT\n"));
3407 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_SHUTDOWN, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
3408 }
3409
3410 RT_NOREF(pVCpu);
3411 return VINF_EM_TRIPLE_FAULT;
3412}
3413
3414
3415/**
3416 * Validates a new SS segment.
3417 *
3418 * @returns VBox strict status code.
3419 * @param pVCpu The cross context virtual CPU structure of the
3420 * calling thread.
3421 * @param NewSS The new SS selctor.
3422 * @param uCpl The CPL to load the stack for.
3423 * @param pDesc Where to return the descriptor.
3424 */
3425IEM_STATIC VBOXSTRICTRC iemMiscValidateNewSS(PVMCPUCC pVCpu, RTSEL NewSS, uint8_t uCpl, PIEMSELDESC pDesc)
3426{
3427 /* Null selectors are not allowed (we're not called for dispatching
3428 interrupts with SS=0 in long mode). */
3429 if (!(NewSS & X86_SEL_MASK_OFF_RPL))
3430 {
3431 Log(("iemMiscValidateNewSSandRsp: %#x - null selector -> #TS(0)\n", NewSS));
3432 return iemRaiseTaskSwitchFault0(pVCpu);
3433 }
3434
3435 /** @todo testcase: check that the TSS.ssX RPL is checked. Also check when. */
3436 if ((NewSS & X86_SEL_RPL) != uCpl)
3437 {
3438 Log(("iemMiscValidateNewSSandRsp: %#x - RPL and CPL (%d) differs -> #TS\n", NewSS, uCpl));
3439 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3440 }
3441
3442 /*
3443 * Read the descriptor.
3444 */
3445 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, pDesc, NewSS, X86_XCPT_TS);
3446 if (rcStrict != VINF_SUCCESS)
3447 return rcStrict;
3448
3449 /*
3450 * Perform the descriptor validation documented for LSS, POP SS and MOV SS.
3451 */
3452 if (!pDesc->Legacy.Gen.u1DescType)
3453 {
3454 Log(("iemMiscValidateNewSSandRsp: %#x - system selector (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3455 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3456 }
3457
3458 if ( (pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
3459 || !(pDesc->Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE) )
3460 {
3461 Log(("iemMiscValidateNewSSandRsp: %#x - code or read only (%#x) -> #TS\n", NewSS, pDesc->Legacy.Gen.u4Type));
3462 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3463 }
3464 if (pDesc->Legacy.Gen.u2Dpl != uCpl)
3465 {
3466 Log(("iemMiscValidateNewSSandRsp: %#x - DPL (%d) and CPL (%d) differs -> #TS\n", NewSS, pDesc->Legacy.Gen.u2Dpl, uCpl));
3467 return iemRaiseTaskSwitchFaultBySelector(pVCpu, NewSS);
3468 }
3469
3470 /* Is it there? */
3471 /** @todo testcase: Is this checked before the canonical / limit check below? */
3472 if (!pDesc->Legacy.Gen.u1Present)
3473 {
3474 Log(("iemMiscValidateNewSSandRsp: %#x - segment not present -> #NP\n", NewSS));
3475 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewSS);
3476 }
3477
3478 return VINF_SUCCESS;
3479}
3480
3481
3482/**
3483 * Gets the correct EFLAGS regardless of whether PATM stores parts of them or
3484 * not (kind of obsolete now).
3485 *
3486 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3487 */
3488#define IEMMISC_GET_EFL(a_pVCpu) ( (a_pVCpu)->cpum.GstCtx.eflags.u )
3489
3490/**
3491 * Updates the EFLAGS in the correct manner wrt. PATM (kind of obsolete).
3492 *
3493 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3494 * @param a_fEfl The new EFLAGS.
3495 */
3496#define IEMMISC_SET_EFL(a_pVCpu, a_fEfl) do { (a_pVCpu)->cpum.GstCtx.eflags.u = (a_fEfl); } while (0)
3497
3498/** @} */
3499
3500
3501/** @name Raising Exceptions.
3502 *
3503 * @{
3504 */
3505
3506
3507/**
3508 * Loads the specified stack far pointer from the TSS.
3509 *
3510 * @returns VBox strict status code.
3511 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3512 * @param uCpl The CPL to load the stack for.
3513 * @param pSelSS Where to return the new stack segment.
3514 * @param puEsp Where to return the new stack pointer.
3515 */
3516IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss32Or16(PVMCPUCC pVCpu, uint8_t uCpl, PRTSEL pSelSS, uint32_t *puEsp)
3517{
3518 VBOXSTRICTRC rcStrict;
3519 Assert(uCpl < 4);
3520
3521 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3522 switch (pVCpu->cpum.GstCtx.tr.Attr.n.u4Type)
3523 {
3524 /*
3525 * 16-bit TSS (X86TSS16).
3526 */
3527 case X86_SEL_TYPE_SYS_286_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3528 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
3529 {
3530 uint32_t off = uCpl * 4 + 2;
3531 if (off + 4 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3532 {
3533 /** @todo check actual access pattern here. */
3534 uint32_t u32Tmp = 0; /* gcc maybe... */
3535 rcStrict = iemMemFetchSysU32(pVCpu, &u32Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3536 if (rcStrict == VINF_SUCCESS)
3537 {
3538 *puEsp = RT_LOWORD(u32Tmp);
3539 *pSelSS = RT_HIWORD(u32Tmp);
3540 return VINF_SUCCESS;
3541 }
3542 }
3543 else
3544 {
3545 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3546 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3547 }
3548 break;
3549 }
3550
3551 /*
3552 * 32-bit TSS (X86TSS32).
3553 */
3554 case X86_SEL_TYPE_SYS_386_TSS_AVAIL: AssertFailed(); RT_FALL_THRU();
3555 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
3556 {
3557 uint32_t off = uCpl * 8 + 4;
3558 if (off + 7 <= pVCpu->cpum.GstCtx.tr.u32Limit)
3559 {
3560/** @todo check actual access pattern here. */
3561 uint64_t u64Tmp;
3562 rcStrict = iemMemFetchSysU64(pVCpu, &u64Tmp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3563 if (rcStrict == VINF_SUCCESS)
3564 {
3565 *puEsp = u64Tmp & UINT32_MAX;
3566 *pSelSS = (RTSEL)(u64Tmp >> 32);
3567 return VINF_SUCCESS;
3568 }
3569 }
3570 else
3571 {
3572 Log(("LoadStackFromTss32Or16: out of bounds! uCpl=%d, u32Limit=%#x TSS16\n", uCpl, pVCpu->cpum.GstCtx.tr.u32Limit));
3573 rcStrict = iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3574 }
3575 break;
3576 }
3577
3578 default:
3579 AssertFailed();
3580 rcStrict = VERR_IEM_IPE_4;
3581 break;
3582 }
3583
3584 *puEsp = 0; /* make gcc happy */
3585 *pSelSS = 0; /* make gcc happy */
3586 return rcStrict;
3587}
3588
3589
3590/**
3591 * Loads the specified stack pointer from the 64-bit TSS.
3592 *
3593 * @returns VBox strict status code.
3594 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3595 * @param uCpl The CPL to load the stack for.
3596 * @param uIst The interrupt stack table index, 0 if to use uCpl.
3597 * @param puRsp Where to return the new stack pointer.
3598 */
3599IEM_STATIC VBOXSTRICTRC iemRaiseLoadStackFromTss64(PVMCPUCC pVCpu, uint8_t uCpl, uint8_t uIst, uint64_t *puRsp)
3600{
3601 Assert(uCpl < 4);
3602 Assert(uIst < 8);
3603 *puRsp = 0; /* make gcc happy */
3604
3605 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_TR | CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
3606 AssertReturn(pVCpu->cpum.GstCtx.tr.Attr.n.u4Type == AMD64_SEL_TYPE_SYS_TSS_BUSY, VERR_IEM_IPE_5);
3607
3608 uint32_t off;
3609 if (uIst)
3610 off = (uIst - 1) * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, ist1);
3611 else
3612 off = uCpl * sizeof(uint64_t) + RT_UOFFSETOF(X86TSS64, rsp0);
3613 if (off + sizeof(uint64_t) > pVCpu->cpum.GstCtx.tr.u32Limit)
3614 {
3615 Log(("iemRaiseLoadStackFromTss64: out of bounds! uCpl=%d uIst=%d, u32Limit=%#x\n", uCpl, uIst, pVCpu->cpum.GstCtx.tr.u32Limit));
3616 return iemRaiseTaskSwitchFaultCurrentTSS(pVCpu);
3617 }
3618
3619 return iemMemFetchSysU64(pVCpu, puRsp, UINT8_MAX, pVCpu->cpum.GstCtx.tr.u64Base + off);
3620}
3621
3622
3623/**
3624 * Adjust the CPU state according to the exception being raised.
3625 *
3626 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3627 * @param u8Vector The exception that has been raised.
3628 */
3629DECLINLINE(void) iemRaiseXcptAdjustState(PVMCPUCC pVCpu, uint8_t u8Vector)
3630{
3631 switch (u8Vector)
3632 {
3633 case X86_XCPT_DB:
3634 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_DR7);
3635 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
3636 break;
3637 /** @todo Read the AMD and Intel exception reference... */
3638 }
3639}
3640
3641
3642/**
3643 * Implements exceptions and interrupts for real mode.
3644 *
3645 * @returns VBox strict status code.
3646 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3647 * @param cbInstr The number of bytes to offset rIP by in the return
3648 * address.
3649 * @param u8Vector The interrupt / exception vector number.
3650 * @param fFlags The flags.
3651 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3652 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3653 */
3654IEM_STATIC VBOXSTRICTRC
3655iemRaiseXcptOrIntInRealMode(PVMCPUCC pVCpu,
3656 uint8_t cbInstr,
3657 uint8_t u8Vector,
3658 uint32_t fFlags,
3659 uint16_t uErr,
3660 uint64_t uCr2)
3661{
3662 NOREF(uErr); NOREF(uCr2);
3663 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3664
3665 /*
3666 * Read the IDT entry.
3667 */
3668 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(4) * u8Vector + 3)
3669 {
3670 Log(("RaiseXcptOrIntInRealMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
3671 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
3672 }
3673 RTFAR16 Idte;
3674 VBOXSTRICTRC rcStrict = iemMemFetchDataU32(pVCpu, (uint32_t *)&Idte, UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(4) * u8Vector);
3675 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3676 {
3677 Log(("iemRaiseXcptOrIntInRealMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
3678 return rcStrict;
3679 }
3680
3681 /*
3682 * Push the stack frame.
3683 */
3684 uint16_t *pu16Frame;
3685 uint64_t uNewRsp;
3686 rcStrict = iemMemStackPushBeginSpecial(pVCpu, 6, (void **)&pu16Frame, &uNewRsp);
3687 if (rcStrict != VINF_SUCCESS)
3688 return rcStrict;
3689
3690 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
3691#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
3692 AssertCompile(IEMTARGETCPU_8086 <= IEMTARGETCPU_186 && IEMTARGETCPU_V20 <= IEMTARGETCPU_186 && IEMTARGETCPU_286 > IEMTARGETCPU_186);
3693 if (pVCpu->iem.s.uTargetCpu <= IEMTARGETCPU_186)
3694 fEfl |= UINT16_C(0xf000);
3695#endif
3696 pu16Frame[2] = (uint16_t)fEfl;
3697 pu16Frame[1] = (uint16_t)pVCpu->cpum.GstCtx.cs.Sel;
3698 pu16Frame[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
3699 rcStrict = iemMemStackPushCommitSpecial(pVCpu, pu16Frame, uNewRsp);
3700 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
3701 return rcStrict;
3702
3703 /*
3704 * Load the vector address into cs:ip and make exception specific state
3705 * adjustments.
3706 */
3707 pVCpu->cpum.GstCtx.cs.Sel = Idte.sel;
3708 pVCpu->cpum.GstCtx.cs.ValidSel = Idte.sel;
3709 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
3710 pVCpu->cpum.GstCtx.cs.u64Base = (uint32_t)Idte.sel << 4;
3711 /** @todo do we load attribs and limit as well? Should we check against limit like far jump? */
3712 pVCpu->cpum.GstCtx.rip = Idte.off;
3713 fEfl &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_AC);
3714 IEMMISC_SET_EFL(pVCpu, fEfl);
3715
3716 /** @todo do we actually do this in real mode? */
3717 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
3718 iemRaiseXcptAdjustState(pVCpu, u8Vector);
3719
3720 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
3721}
3722
3723
3724/**
3725 * Loads a NULL data selector into when coming from V8086 mode.
3726 *
3727 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3728 * @param pSReg Pointer to the segment register.
3729 */
3730IEM_STATIC void iemHlpLoadNullDataSelectorOnV86Xcpt(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
3731{
3732 pSReg->Sel = 0;
3733 pSReg->ValidSel = 0;
3734 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3735 {
3736 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */
3737 pSReg->Attr.u &= X86DESCATTR_DT | X86DESCATTR_TYPE | X86DESCATTR_DPL | X86DESCATTR_G | X86DESCATTR_D;
3738 pSReg->Attr.u |= X86DESCATTR_UNUSABLE;
3739 }
3740 else
3741 {
3742 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3743 /** @todo check this on AMD-V */
3744 pSReg->u64Base = 0;
3745 pSReg->u32Limit = 0;
3746 }
3747}
3748
3749
3750/**
3751 * Loads a segment selector during a task switch in V8086 mode.
3752 *
3753 * @param pSReg Pointer to the segment register.
3754 * @param uSel The selector value to load.
3755 */
3756IEM_STATIC void iemHlpLoadSelectorInV86Mode(PCPUMSELREG pSReg, uint16_t uSel)
3757{
3758 /* See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3759 pSReg->Sel = uSel;
3760 pSReg->ValidSel = uSel;
3761 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3762 pSReg->u64Base = uSel << 4;
3763 pSReg->u32Limit = 0xffff;
3764 pSReg->Attr.u = 0xf3;
3765}
3766
3767
3768/**
3769 * Loads a NULL data selector into a selector register, both the hidden and
3770 * visible parts, in protected mode.
3771 *
3772 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3773 * @param pSReg Pointer to the segment register.
3774 * @param uRpl The RPL.
3775 */
3776IEM_STATIC void iemHlpLoadNullDataSelectorProt(PVMCPUCC pVCpu, PCPUMSELREG pSReg, RTSEL uRpl)
3777{
3778 /** @todo Testcase: write a testcase checking what happends when loading a NULL
3779 * data selector in protected mode. */
3780 pSReg->Sel = uRpl;
3781 pSReg->ValidSel = uRpl;
3782 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3783 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3784 {
3785 /* VT-x (Intel 3960x) observed doing something like this. */
3786 pSReg->Attr.u = X86DESCATTR_UNUSABLE | X86DESCATTR_G | X86DESCATTR_D | (pVCpu->iem.s.uCpl << X86DESCATTR_DPL_SHIFT);
3787 pSReg->u32Limit = UINT32_MAX;
3788 pSReg->u64Base = 0;
3789 }
3790 else
3791 {
3792 pSReg->Attr.u = X86DESCATTR_UNUSABLE;
3793 pSReg->u32Limit = 0;
3794 pSReg->u64Base = 0;
3795 }
3796}
3797
3798
3799/**
3800 * Loads a segment selector during a task switch in protected mode.
3801 *
3802 * In this task switch scenario, we would throw \#TS exceptions rather than
3803 * \#GPs.
3804 *
3805 * @returns VBox strict status code.
3806 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3807 * @param pSReg Pointer to the segment register.
3808 * @param uSel The new selector value.
3809 *
3810 * @remarks This does _not_ handle CS or SS.
3811 * @remarks This expects pVCpu->iem.s.uCpl to be up to date.
3812 */
3813IEM_STATIC VBOXSTRICTRC iemHlpTaskSwitchLoadDataSelectorInProtMode(PVMCPUCC pVCpu, PCPUMSELREG pSReg, uint16_t uSel)
3814{
3815 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3816
3817 /* Null data selector. */
3818 if (!(uSel & X86_SEL_MASK_OFF_RPL))
3819 {
3820 iemHlpLoadNullDataSelectorProt(pVCpu, pSReg, uSel);
3821 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3822 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3823 return VINF_SUCCESS;
3824 }
3825
3826 /* Fetch the descriptor. */
3827 IEMSELDESC Desc;
3828 VBOXSTRICTRC rcStrict = iemMemFetchSelDesc(pVCpu, &Desc, uSel, X86_XCPT_TS);
3829 if (rcStrict != VINF_SUCCESS)
3830 {
3831 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: failed to fetch selector. uSel=%u rc=%Rrc\n", uSel,
3832 VBOXSTRICTRC_VAL(rcStrict)));
3833 return rcStrict;
3834 }
3835
3836 /* Must be a data segment or readable code segment. */
3837 if ( !Desc.Legacy.Gen.u1DescType
3838 || (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
3839 {
3840 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: invalid segment type. uSel=%u Desc.u4Type=%#x\n", uSel,
3841 Desc.Legacy.Gen.u4Type));
3842 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3843 }
3844
3845 /* Check privileges for data segments and non-conforming code segments. */
3846 if ( (Desc.Legacy.Gen.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3847 != (X86_SEL_TYPE_CODE | X86_SEL_TYPE_CONF))
3848 {
3849 /* The RPL and the new CPL must be less than or equal to the DPL. */
3850 if ( (unsigned)(uSel & X86_SEL_RPL) > Desc.Legacy.Gen.u2Dpl
3851 || (pVCpu->iem.s.uCpl > Desc.Legacy.Gen.u2Dpl))
3852 {
3853 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Invalid priv. uSel=%u uSel.RPL=%u DPL=%u CPL=%u\n",
3854 uSel, (uSel & X86_SEL_RPL), Desc.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
3855 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3856 }
3857 }
3858
3859 /* Is it there? */
3860 if (!Desc.Legacy.Gen.u1Present)
3861 {
3862 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: Segment not present. uSel=%u\n", uSel));
3863 return iemRaiseSelectorNotPresentWithErr(pVCpu, uSel & X86_SEL_MASK_OFF_RPL);
3864 }
3865
3866 /* The base and limit. */
3867 uint32_t cbLimit = X86DESC_LIMIT_G(&Desc.Legacy);
3868 uint64_t u64Base = X86DESC_BASE(&Desc.Legacy);
3869
3870 /*
3871 * Ok, everything checked out fine. Now set the accessed bit before
3872 * committing the result into the registers.
3873 */
3874 if (!(Desc.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
3875 {
3876 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uSel);
3877 if (rcStrict != VINF_SUCCESS)
3878 return rcStrict;
3879 Desc.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
3880 }
3881
3882 /* Commit */
3883 pSReg->Sel = uSel;
3884 pSReg->Attr.u = X86DESC_GET_HID_ATTR(&Desc.Legacy);
3885 pSReg->u32Limit = cbLimit;
3886 pSReg->u64Base = u64Base; /** @todo testcase/investigate: seen claims that the upper half of the base remains unchanged... */
3887 pSReg->ValidSel = uSel;
3888 pSReg->fFlags = CPUMSELREG_FLAGS_VALID;
3889 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
3890 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE;
3891
3892 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
3893 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
3894 return VINF_SUCCESS;
3895}
3896
3897
3898/**
3899 * Performs a task switch.
3900 *
3901 * If the task switch is the result of a JMP, CALL or IRET instruction, the
3902 * caller is responsible for performing the necessary checks (like DPL, TSS
3903 * present etc.) which are specific to JMP/CALL/IRET. See Intel Instruction
3904 * reference for JMP, CALL, IRET.
3905 *
3906 * If the task switch is the due to a software interrupt or hardware exception,
3907 * the caller is responsible for validating the TSS selector and descriptor. See
3908 * Intel Instruction reference for INT n.
3909 *
3910 * @returns VBox strict status code.
3911 * @param pVCpu The cross context virtual CPU structure of the calling thread.
3912 * @param enmTaskSwitch The cause of the task switch.
3913 * @param uNextEip The EIP effective after the task switch.
3914 * @param fFlags The flags, see IEM_XCPT_FLAGS_XXX.
3915 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
3916 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
3917 * @param SelTSS The TSS selector of the new task.
3918 * @param pNewDescTSS Pointer to the new TSS descriptor.
3919 */
3920IEM_STATIC VBOXSTRICTRC
3921iemTaskSwitch(PVMCPUCC pVCpu,
3922 IEMTASKSWITCH enmTaskSwitch,
3923 uint32_t uNextEip,
3924 uint32_t fFlags,
3925 uint16_t uErr,
3926 uint64_t uCr2,
3927 RTSEL SelTSS,
3928 PIEMSELDESC pNewDescTSS)
3929{
3930 Assert(!IEM_IS_REAL_MODE(pVCpu));
3931 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
3932 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
3933
3934 uint32_t const uNewTSSType = pNewDescTSS->Legacy.Gate.u4Type;
3935 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_AVAIL
3936 || uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
3937 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3938 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3939
3940 bool const fIsNewTSS386 = ( uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_AVAIL
3941 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
3942
3943 Log(("iemTaskSwitch: enmTaskSwitch=%u NewTSS=%#x fIsNewTSS386=%RTbool EIP=%#RX32 uNextEip=%#RX32\n", enmTaskSwitch, SelTSS,
3944 fIsNewTSS386, pVCpu->cpum.GstCtx.eip, uNextEip));
3945
3946 /* Update CR2 in case it's a page-fault. */
3947 /** @todo This should probably be done much earlier in IEM/PGM. See
3948 * @bugref{5653#c49}. */
3949 if (fFlags & IEM_XCPT_FLAGS_CR2)
3950 pVCpu->cpum.GstCtx.cr2 = uCr2;
3951
3952 /*
3953 * Check the new TSS limit. See Intel spec. 6.15 "Exception and Interrupt Reference"
3954 * subsection "Interrupt 10 - Invalid TSS Exception (#TS)".
3955 */
3956 uint32_t const uNewTSSLimit = pNewDescTSS->Legacy.Gen.u16LimitLow | (pNewDescTSS->Legacy.Gen.u4LimitHigh << 16);
3957 uint32_t const uNewTSSLimitMin = fIsNewTSS386 ? X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN : X86_SEL_TYPE_SYS_286_TSS_LIMIT_MIN;
3958 if (uNewTSSLimit < uNewTSSLimitMin)
3959 {
3960 Log(("iemTaskSwitch: Invalid new TSS limit. enmTaskSwitch=%u uNewTSSLimit=%#x uNewTSSLimitMin=%#x -> #TS\n",
3961 enmTaskSwitch, uNewTSSLimit, uNewTSSLimitMin));
3962 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
3963 }
3964
3965 /*
3966 * Task switches in VMX non-root mode always cause task switches.
3967 * The new TSS must have been read and validated (DPL, limits etc.) before a
3968 * task-switch VM-exit commences.
3969 *
3970 * See Intel spec. 25.4.2 "Treatment of Task Switches".
3971 */
3972 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
3973 {
3974 Log(("iemTaskSwitch: Guest intercept (source=%u, sel=%#x) -> VM-exit.\n", enmTaskSwitch, SelTSS));
3975 IEM_VMX_VMEXIT_TASK_SWITCH_RET(pVCpu, enmTaskSwitch, SelTSS, uNextEip - pVCpu->cpum.GstCtx.eip);
3976 }
3977
3978 /*
3979 * The SVM nested-guest intercept for task-switch takes priority over all exceptions
3980 * after validating the incoming (new) TSS, see AMD spec. 15.14.1 "Task Switch Intercept".
3981 */
3982 if (IEM_SVM_IS_CTRL_INTERCEPT_SET(pVCpu, SVM_CTRL_INTERCEPT_TASK_SWITCH))
3983 {
3984 uint32_t const uExitInfo1 = SelTSS;
3985 uint32_t uExitInfo2 = uErr;
3986 switch (enmTaskSwitch)
3987 {
3988 case IEMTASKSWITCH_JUMP: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_JUMP; break;
3989 case IEMTASKSWITCH_IRET: uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_IRET; break;
3990 default: break;
3991 }
3992 if (fFlags & IEM_XCPT_FLAGS_ERR)
3993 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_HAS_ERROR_CODE;
3994 if (pVCpu->cpum.GstCtx.eflags.Bits.u1RF)
3995 uExitInfo2 |= SVM_EXIT2_TASK_SWITCH_EFLAGS_RF;
3996
3997 Log(("iemTaskSwitch: Guest intercept -> #VMEXIT. uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uExitInfo1, uExitInfo2));
3998 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_TASK_SWITCH, uExitInfo1, uExitInfo2);
3999 RT_NOREF2(uExitInfo1, uExitInfo2);
4000 }
4001
4002 /*
4003 * Check the current TSS limit. The last written byte to the current TSS during the
4004 * task switch will be 2 bytes at offset 0x5C (32-bit) and 1 byte at offset 0x28 (16-bit).
4005 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4006 *
4007 * The AMD docs doesn't mention anything about limit checks with LTR which suggests you can
4008 * end up with smaller than "legal" TSS limits.
4009 */
4010 uint32_t const uCurTSSLimit = pVCpu->cpum.GstCtx.tr.u32Limit;
4011 uint32_t const uCurTSSLimitMin = fIsNewTSS386 ? 0x5F : 0x29;
4012 if (uCurTSSLimit < uCurTSSLimitMin)
4013 {
4014 Log(("iemTaskSwitch: Invalid current TSS limit. enmTaskSwitch=%u uCurTSSLimit=%#x uCurTSSLimitMin=%#x -> #TS\n",
4015 enmTaskSwitch, uCurTSSLimit, uCurTSSLimitMin));
4016 return iemRaiseTaskSwitchFaultWithErr(pVCpu, SelTSS & X86_SEL_MASK_OFF_RPL);
4017 }
4018
4019 /*
4020 * Verify that the new TSS can be accessed and map it. Map only the required contents
4021 * and not the entire TSS.
4022 */
4023 void *pvNewTSS;
4024 uint32_t const cbNewTSS = uNewTSSLimitMin + 1;
4025 RTGCPTR const GCPtrNewTSS = X86DESC_BASE(&pNewDescTSS->Legacy);
4026 AssertCompile(sizeof(X86TSS32) == X86_SEL_TYPE_SYS_386_TSS_LIMIT_MIN + 1);
4027 /** @todo Handle if the TSS crosses a page boundary. Intel specifies that it may
4028 * not perform correct translation if this happens. See Intel spec. 7.2.1
4029 * "Task-State Segment". */
4030 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, &pvNewTSS, cbNewTSS, UINT8_MAX, GCPtrNewTSS, IEM_ACCESS_SYS_RW);
4031 if (rcStrict != VINF_SUCCESS)
4032 {
4033 Log(("iemTaskSwitch: Failed to read new TSS. enmTaskSwitch=%u cbNewTSS=%u uNewTSSLimit=%u rc=%Rrc\n", enmTaskSwitch,
4034 cbNewTSS, uNewTSSLimit, VBOXSTRICTRC_VAL(rcStrict)));
4035 return rcStrict;
4036 }
4037
4038 /*
4039 * Clear the busy bit in current task's TSS descriptor if it's a task switch due to JMP/IRET.
4040 */
4041 uint32_t u32EFlags = pVCpu->cpum.GstCtx.eflags.u32;
4042 if ( enmTaskSwitch == IEMTASKSWITCH_JUMP
4043 || enmTaskSwitch == IEMTASKSWITCH_IRET)
4044 {
4045 PX86DESC pDescCurTSS;
4046 rcStrict = iemMemMap(pVCpu, (void **)&pDescCurTSS, sizeof(*pDescCurTSS), UINT8_MAX,
4047 pVCpu->cpum.GstCtx.gdtr.pGdt + (pVCpu->cpum.GstCtx.tr.Sel & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4048 if (rcStrict != VINF_SUCCESS)
4049 {
4050 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4051 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4052 return rcStrict;
4053 }
4054
4055 pDescCurTSS->Gate.u4Type &= ~X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4056 rcStrict = iemMemCommitAndUnmap(pVCpu, pDescCurTSS, IEM_ACCESS_SYS_RW);
4057 if (rcStrict != VINF_SUCCESS)
4058 {
4059 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT. enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4060 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4061 return rcStrict;
4062 }
4063
4064 /* Clear EFLAGS.NT (Nested Task) in the eflags memory image, if it's a task switch due to an IRET. */
4065 if (enmTaskSwitch == IEMTASKSWITCH_IRET)
4066 {
4067 Assert( uNewTSSType == X86_SEL_TYPE_SYS_286_TSS_BUSY
4068 || uNewTSSType == X86_SEL_TYPE_SYS_386_TSS_BUSY);
4069 u32EFlags &= ~X86_EFL_NT;
4070 }
4071 }
4072
4073 /*
4074 * Save the CPU state into the current TSS.
4075 */
4076 RTGCPTR const GCPtrCurTSS = pVCpu->cpum.GstCtx.tr.u64Base;
4077 if (GCPtrNewTSS == GCPtrCurTSS)
4078 {
4079 Log(("iemTaskSwitch: Switching to the same TSS! enmTaskSwitch=%u GCPtr[Cur|New]TSS=%#RGv\n", enmTaskSwitch, GCPtrCurTSS));
4080 Log(("uCurCr3=%#x uCurEip=%#x uCurEflags=%#x uCurEax=%#x uCurEsp=%#x uCurEbp=%#x uCurCS=%#04x uCurSS=%#04x uCurLdt=%#x\n",
4081 pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.u32, pVCpu->cpum.GstCtx.eax,
4082 pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel,
4083 pVCpu->cpum.GstCtx.ldtr.Sel));
4084 }
4085 if (fIsNewTSS386)
4086 {
4087 /*
4088 * Verify that the current TSS (32-bit) can be accessed, only the minimum required size.
4089 * See Intel spec. 7.2.1 "Task-State Segment (TSS)" for static and dynamic fields.
4090 */
4091 void *pvCurTSS32;
4092 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS32, eip);
4093 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS32, selLdt) - RT_UOFFSETOF(X86TSS32, eip);
4094 AssertCompile(RTASSERT_OFFSET_OF(X86TSS32, selLdt) - RTASSERT_OFFSET_OF(X86TSS32, eip) == 64);
4095 rcStrict = iemMemMap(pVCpu, &pvCurTSS32, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4096 if (rcStrict != VINF_SUCCESS)
4097 {
4098 Log(("iemTaskSwitch: Failed to read current 32-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4099 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4100 return rcStrict;
4101 }
4102
4103 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4104 PX86TSS32 pCurTSS32 = (PX86TSS32)((uintptr_t)pvCurTSS32 - offCurTSS);
4105 pCurTSS32->eip = uNextEip;
4106 pCurTSS32->eflags = u32EFlags;
4107 pCurTSS32->eax = pVCpu->cpum.GstCtx.eax;
4108 pCurTSS32->ecx = pVCpu->cpum.GstCtx.ecx;
4109 pCurTSS32->edx = pVCpu->cpum.GstCtx.edx;
4110 pCurTSS32->ebx = pVCpu->cpum.GstCtx.ebx;
4111 pCurTSS32->esp = pVCpu->cpum.GstCtx.esp;
4112 pCurTSS32->ebp = pVCpu->cpum.GstCtx.ebp;
4113 pCurTSS32->esi = pVCpu->cpum.GstCtx.esi;
4114 pCurTSS32->edi = pVCpu->cpum.GstCtx.edi;
4115 pCurTSS32->es = pVCpu->cpum.GstCtx.es.Sel;
4116 pCurTSS32->cs = pVCpu->cpum.GstCtx.cs.Sel;
4117 pCurTSS32->ss = pVCpu->cpum.GstCtx.ss.Sel;
4118 pCurTSS32->ds = pVCpu->cpum.GstCtx.ds.Sel;
4119 pCurTSS32->fs = pVCpu->cpum.GstCtx.fs.Sel;
4120 pCurTSS32->gs = pVCpu->cpum.GstCtx.gs.Sel;
4121
4122 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS32, IEM_ACCESS_SYS_RW);
4123 if (rcStrict != VINF_SUCCESS)
4124 {
4125 Log(("iemTaskSwitch: Failed to commit current 32-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4126 VBOXSTRICTRC_VAL(rcStrict)));
4127 return rcStrict;
4128 }
4129 }
4130 else
4131 {
4132 /*
4133 * Verify that the current TSS (16-bit) can be accessed. Again, only the minimum required size.
4134 */
4135 void *pvCurTSS16;
4136 uint32_t const offCurTSS = RT_UOFFSETOF(X86TSS16, ip);
4137 uint32_t const cbCurTSS = RT_UOFFSETOF(X86TSS16, selLdt) - RT_UOFFSETOF(X86TSS16, ip);
4138 AssertCompile(RTASSERT_OFFSET_OF(X86TSS16, selLdt) - RTASSERT_OFFSET_OF(X86TSS16, ip) == 28);
4139 rcStrict = iemMemMap(pVCpu, &pvCurTSS16, cbCurTSS, UINT8_MAX, GCPtrCurTSS + offCurTSS, IEM_ACCESS_SYS_RW);
4140 if (rcStrict != VINF_SUCCESS)
4141 {
4142 Log(("iemTaskSwitch: Failed to read current 16-bit TSS. enmTaskSwitch=%u GCPtrCurTSS=%#RGv cb=%u rc=%Rrc\n",
4143 enmTaskSwitch, GCPtrCurTSS, cbCurTSS, VBOXSTRICTRC_VAL(rcStrict)));
4144 return rcStrict;
4145 }
4146
4147 /* !! WARNING !! Access -only- the members (dynamic fields) that are mapped, i.e interval [offCurTSS..cbCurTSS). */
4148 PX86TSS16 pCurTSS16 = (PX86TSS16)((uintptr_t)pvCurTSS16 - offCurTSS);
4149 pCurTSS16->ip = uNextEip;
4150 pCurTSS16->flags = u32EFlags;
4151 pCurTSS16->ax = pVCpu->cpum.GstCtx.ax;
4152 pCurTSS16->cx = pVCpu->cpum.GstCtx.cx;
4153 pCurTSS16->dx = pVCpu->cpum.GstCtx.dx;
4154 pCurTSS16->bx = pVCpu->cpum.GstCtx.bx;
4155 pCurTSS16->sp = pVCpu->cpum.GstCtx.sp;
4156 pCurTSS16->bp = pVCpu->cpum.GstCtx.bp;
4157 pCurTSS16->si = pVCpu->cpum.GstCtx.si;
4158 pCurTSS16->di = pVCpu->cpum.GstCtx.di;
4159 pCurTSS16->es = pVCpu->cpum.GstCtx.es.Sel;
4160 pCurTSS16->cs = pVCpu->cpum.GstCtx.cs.Sel;
4161 pCurTSS16->ss = pVCpu->cpum.GstCtx.ss.Sel;
4162 pCurTSS16->ds = pVCpu->cpum.GstCtx.ds.Sel;
4163
4164 rcStrict = iemMemCommitAndUnmap(pVCpu, pvCurTSS16, IEM_ACCESS_SYS_RW);
4165 if (rcStrict != VINF_SUCCESS)
4166 {
4167 Log(("iemTaskSwitch: Failed to commit current 16-bit TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch,
4168 VBOXSTRICTRC_VAL(rcStrict)));
4169 return rcStrict;
4170 }
4171 }
4172
4173 /*
4174 * Update the previous task link field for the new TSS, if the task switch is due to a CALL/INT_XCPT.
4175 */
4176 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4177 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4178 {
4179 /* 16 or 32-bit TSS doesn't matter, we only access the first, common 16-bit field (selPrev) here. */
4180 PX86TSS32 pNewTSS = (PX86TSS32)pvNewTSS;
4181 pNewTSS->selPrev = pVCpu->cpum.GstCtx.tr.Sel;
4182 }
4183
4184 /*
4185 * Read the state from the new TSS into temporaries. Setting it immediately as the new CPU state is tricky,
4186 * it's done further below with error handling (e.g. CR3 changes will go through PGM).
4187 */
4188 uint32_t uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEcx, uNewEdx, uNewEbx, uNewEsp, uNewEbp, uNewEsi, uNewEdi;
4189 uint16_t uNewES, uNewCS, uNewSS, uNewDS, uNewFS, uNewGS, uNewLdt;
4190 bool fNewDebugTrap;
4191 if (fIsNewTSS386)
4192 {
4193 PCX86TSS32 pNewTSS32 = (PCX86TSS32)pvNewTSS;
4194 uNewCr3 = (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG) ? pNewTSS32->cr3 : 0;
4195 uNewEip = pNewTSS32->eip;
4196 uNewEflags = pNewTSS32->eflags;
4197 uNewEax = pNewTSS32->eax;
4198 uNewEcx = pNewTSS32->ecx;
4199 uNewEdx = pNewTSS32->edx;
4200 uNewEbx = pNewTSS32->ebx;
4201 uNewEsp = pNewTSS32->esp;
4202 uNewEbp = pNewTSS32->ebp;
4203 uNewEsi = pNewTSS32->esi;
4204 uNewEdi = pNewTSS32->edi;
4205 uNewES = pNewTSS32->es;
4206 uNewCS = pNewTSS32->cs;
4207 uNewSS = pNewTSS32->ss;
4208 uNewDS = pNewTSS32->ds;
4209 uNewFS = pNewTSS32->fs;
4210 uNewGS = pNewTSS32->gs;
4211 uNewLdt = pNewTSS32->selLdt;
4212 fNewDebugTrap = RT_BOOL(pNewTSS32->fDebugTrap);
4213 }
4214 else
4215 {
4216 PCX86TSS16 pNewTSS16 = (PCX86TSS16)pvNewTSS;
4217 uNewCr3 = 0;
4218 uNewEip = pNewTSS16->ip;
4219 uNewEflags = pNewTSS16->flags;
4220 uNewEax = UINT32_C(0xffff0000) | pNewTSS16->ax;
4221 uNewEcx = UINT32_C(0xffff0000) | pNewTSS16->cx;
4222 uNewEdx = UINT32_C(0xffff0000) | pNewTSS16->dx;
4223 uNewEbx = UINT32_C(0xffff0000) | pNewTSS16->bx;
4224 uNewEsp = UINT32_C(0xffff0000) | pNewTSS16->sp;
4225 uNewEbp = UINT32_C(0xffff0000) | pNewTSS16->bp;
4226 uNewEsi = UINT32_C(0xffff0000) | pNewTSS16->si;
4227 uNewEdi = UINT32_C(0xffff0000) | pNewTSS16->di;
4228 uNewES = pNewTSS16->es;
4229 uNewCS = pNewTSS16->cs;
4230 uNewSS = pNewTSS16->ss;
4231 uNewDS = pNewTSS16->ds;
4232 uNewFS = 0;
4233 uNewGS = 0;
4234 uNewLdt = pNewTSS16->selLdt;
4235 fNewDebugTrap = false;
4236 }
4237
4238 if (GCPtrNewTSS == GCPtrCurTSS)
4239 Log(("uNewCr3=%#x uNewEip=%#x uNewEflags=%#x uNewEax=%#x uNewEsp=%#x uNewEbp=%#x uNewCS=%#04x uNewSS=%#04x uNewLdt=%#x\n",
4240 uNewCr3, uNewEip, uNewEflags, uNewEax, uNewEsp, uNewEbp, uNewCS, uNewSS, uNewLdt));
4241
4242 /*
4243 * We're done accessing the new TSS.
4244 */
4245 rcStrict = iemMemCommitAndUnmap(pVCpu, pvNewTSS, IEM_ACCESS_SYS_RW);
4246 if (rcStrict != VINF_SUCCESS)
4247 {
4248 Log(("iemTaskSwitch: Failed to commit new TSS. enmTaskSwitch=%u rc=%Rrc\n", enmTaskSwitch, VBOXSTRICTRC_VAL(rcStrict)));
4249 return rcStrict;
4250 }
4251
4252 /*
4253 * Set the busy bit in the new TSS descriptor, if the task switch is a JMP/CALL/INT_XCPT.
4254 */
4255 if (enmTaskSwitch != IEMTASKSWITCH_IRET)
4256 {
4257 rcStrict = iemMemMap(pVCpu, (void **)&pNewDescTSS, sizeof(*pNewDescTSS), UINT8_MAX,
4258 pVCpu->cpum.GstCtx.gdtr.pGdt + (SelTSS & X86_SEL_MASK), IEM_ACCESS_SYS_RW);
4259 if (rcStrict != VINF_SUCCESS)
4260 {
4261 Log(("iemTaskSwitch: Failed to read new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4262 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4263 return rcStrict;
4264 }
4265
4266 /* Check that the descriptor indicates the new TSS is available (not busy). */
4267 AssertMsg( pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_286_TSS_AVAIL
4268 || pNewDescTSS->Legacy.Gate.u4Type == X86_SEL_TYPE_SYS_386_TSS_AVAIL,
4269 ("Invalid TSS descriptor type=%#x", pNewDescTSS->Legacy.Gate.u4Type));
4270
4271 pNewDescTSS->Legacy.Gate.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4272 rcStrict = iemMemCommitAndUnmap(pVCpu, pNewDescTSS, IEM_ACCESS_SYS_RW);
4273 if (rcStrict != VINF_SUCCESS)
4274 {
4275 Log(("iemTaskSwitch: Failed to commit new TSS descriptor in GDT (2). enmTaskSwitch=%u pGdt=%#RX64 rc=%Rrc\n",
4276 enmTaskSwitch, pVCpu->cpum.GstCtx.gdtr.pGdt, VBOXSTRICTRC_VAL(rcStrict)));
4277 return rcStrict;
4278 }
4279 }
4280
4281 /*
4282 * From this point on, we're technically in the new task. We will defer exceptions
4283 * until the completion of the task switch but before executing any instructions in the new task.
4284 */
4285 pVCpu->cpum.GstCtx.tr.Sel = SelTSS;
4286 pVCpu->cpum.GstCtx.tr.ValidSel = SelTSS;
4287 pVCpu->cpum.GstCtx.tr.fFlags = CPUMSELREG_FLAGS_VALID;
4288 pVCpu->cpum.GstCtx.tr.Attr.u = X86DESC_GET_HID_ATTR(&pNewDescTSS->Legacy);
4289 pVCpu->cpum.GstCtx.tr.u32Limit = X86DESC_LIMIT_G(&pNewDescTSS->Legacy);
4290 pVCpu->cpum.GstCtx.tr.u64Base = X86DESC_BASE(&pNewDescTSS->Legacy);
4291 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_TR);
4292
4293 /* Set the busy bit in TR. */
4294 pVCpu->cpum.GstCtx.tr.Attr.n.u4Type |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
4295
4296 /* Set EFLAGS.NT (Nested Task) in the eflags loaded from the new TSS, if it's a task switch due to a CALL/INT_XCPT. */
4297 if ( enmTaskSwitch == IEMTASKSWITCH_CALL
4298 || enmTaskSwitch == IEMTASKSWITCH_INT_XCPT)
4299 {
4300 uNewEflags |= X86_EFL_NT;
4301 }
4302
4303 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_LE_ALL; /** @todo Should we clear DR7.LE bit too? */
4304 pVCpu->cpum.GstCtx.cr0 |= X86_CR0_TS;
4305 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR0);
4306
4307 pVCpu->cpum.GstCtx.eip = uNewEip;
4308 pVCpu->cpum.GstCtx.eax = uNewEax;
4309 pVCpu->cpum.GstCtx.ecx = uNewEcx;
4310 pVCpu->cpum.GstCtx.edx = uNewEdx;
4311 pVCpu->cpum.GstCtx.ebx = uNewEbx;
4312 pVCpu->cpum.GstCtx.esp = uNewEsp;
4313 pVCpu->cpum.GstCtx.ebp = uNewEbp;
4314 pVCpu->cpum.GstCtx.esi = uNewEsi;
4315 pVCpu->cpum.GstCtx.edi = uNewEdi;
4316
4317 uNewEflags &= X86_EFL_LIVE_MASK;
4318 uNewEflags |= X86_EFL_RA1_MASK;
4319 IEMMISC_SET_EFL(pVCpu, uNewEflags);
4320
4321 /*
4322 * Switch the selectors here and do the segment checks later. If we throw exceptions, the selectors
4323 * will be valid in the exception handler. We cannot update the hidden parts until we've switched CR3
4324 * due to the hidden part data originating from the guest LDT/GDT which is accessed through paging.
4325 */
4326 pVCpu->cpum.GstCtx.es.Sel = uNewES;
4327 pVCpu->cpum.GstCtx.es.Attr.u &= ~X86DESCATTR_P;
4328
4329 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4330 pVCpu->cpum.GstCtx.cs.Attr.u &= ~X86DESCATTR_P;
4331
4332 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4333 pVCpu->cpum.GstCtx.ss.Attr.u &= ~X86DESCATTR_P;
4334
4335 pVCpu->cpum.GstCtx.ds.Sel = uNewDS;
4336 pVCpu->cpum.GstCtx.ds.Attr.u &= ~X86DESCATTR_P;
4337
4338 pVCpu->cpum.GstCtx.fs.Sel = uNewFS;
4339 pVCpu->cpum.GstCtx.fs.Attr.u &= ~X86DESCATTR_P;
4340
4341 pVCpu->cpum.GstCtx.gs.Sel = uNewGS;
4342 pVCpu->cpum.GstCtx.gs.Attr.u &= ~X86DESCATTR_P;
4343 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_HIDDEN_SEL_REGS);
4344
4345 pVCpu->cpum.GstCtx.ldtr.Sel = uNewLdt;
4346 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_STALE;
4347 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_P;
4348 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR);
4349
4350 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4351 {
4352 pVCpu->cpum.GstCtx.es.Attr.u |= X86DESCATTR_UNUSABLE;
4353 pVCpu->cpum.GstCtx.cs.Attr.u |= X86DESCATTR_UNUSABLE;
4354 pVCpu->cpum.GstCtx.ss.Attr.u |= X86DESCATTR_UNUSABLE;
4355 pVCpu->cpum.GstCtx.ds.Attr.u |= X86DESCATTR_UNUSABLE;
4356 pVCpu->cpum.GstCtx.fs.Attr.u |= X86DESCATTR_UNUSABLE;
4357 pVCpu->cpum.GstCtx.gs.Attr.u |= X86DESCATTR_UNUSABLE;
4358 pVCpu->cpum.GstCtx.ldtr.Attr.u |= X86DESCATTR_UNUSABLE;
4359 }
4360
4361 /*
4362 * Switch CR3 for the new task.
4363 */
4364 if ( fIsNewTSS386
4365 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PG))
4366 {
4367 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */
4368 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3);
4369 AssertRCSuccessReturn(rc, rc);
4370
4371 /* Inform PGM. */
4372 /** @todo Should we raise \#GP(0) here when PAE PDPEs are invalid? */
4373 rc = PGMFlushTLB(pVCpu, pVCpu->cpum.GstCtx.cr3, !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_PGE));
4374 AssertRCReturn(rc, rc);
4375 /* ignore informational status codes */
4376
4377 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3);
4378 }
4379
4380 /*
4381 * Switch LDTR for the new task.
4382 */
4383 if (!(uNewLdt & X86_SEL_MASK_OFF_RPL))
4384 iemHlpLoadNullDataSelectorProt(pVCpu, &pVCpu->cpum.GstCtx.ldtr, uNewLdt);
4385 else
4386 {
4387 Assert(!pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present); /* Ensures that LDT.TI check passes in iemMemFetchSelDesc() below. */
4388
4389 IEMSELDESC DescNewLdt;
4390 rcStrict = iemMemFetchSelDesc(pVCpu, &DescNewLdt, uNewLdt, X86_XCPT_TS);
4391 if (rcStrict != VINF_SUCCESS)
4392 {
4393 Log(("iemTaskSwitch: fetching LDT failed. enmTaskSwitch=%u uNewLdt=%u cbGdt=%u rc=%Rrc\n", enmTaskSwitch,
4394 uNewLdt, pVCpu->cpum.GstCtx.gdtr.cbGdt, VBOXSTRICTRC_VAL(rcStrict)));
4395 return rcStrict;
4396 }
4397 if ( !DescNewLdt.Legacy.Gen.u1Present
4398 || DescNewLdt.Legacy.Gen.u1DescType
4399 || DescNewLdt.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_LDT)
4400 {
4401 Log(("iemTaskSwitch: Invalid LDT. enmTaskSwitch=%u uNewLdt=%u DescNewLdt.Legacy.u=%#RX64 -> #TS\n", enmTaskSwitch,
4402 uNewLdt, DescNewLdt.Legacy.u));
4403 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL);
4404 }
4405
4406 pVCpu->cpum.GstCtx.ldtr.ValidSel = uNewLdt;
4407 pVCpu->cpum.GstCtx.ldtr.fFlags = CPUMSELREG_FLAGS_VALID;
4408 pVCpu->cpum.GstCtx.ldtr.u64Base = X86DESC_BASE(&DescNewLdt.Legacy);
4409 pVCpu->cpum.GstCtx.ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy);
4410 pVCpu->cpum.GstCtx.ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy);
4411 if (IEM_IS_GUEST_CPU_INTEL(pVCpu))
4412 pVCpu->cpum.GstCtx.ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE;
4413 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ldtr));
4414 }
4415
4416 IEMSELDESC DescSS;
4417 if (IEM_IS_V86_MODE(pVCpu))
4418 {
4419 pVCpu->iem.s.uCpl = 3;
4420 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.es, uNewES);
4421 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.cs, uNewCS);
4422 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ss, uNewSS);
4423 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.ds, uNewDS);
4424 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.fs, uNewFS);
4425 iemHlpLoadSelectorInV86Mode(&pVCpu->cpum.GstCtx.gs, uNewGS);
4426
4427 /* Quick fix: fake DescSS. */ /** @todo fix the code further down? */
4428 DescSS.Legacy.u = 0;
4429 DescSS.Legacy.Gen.u16LimitLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u32Limit;
4430 DescSS.Legacy.Gen.u4LimitHigh = pVCpu->cpum.GstCtx.ss.u32Limit >> 16;
4431 DescSS.Legacy.Gen.u16BaseLow = (uint16_t)pVCpu->cpum.GstCtx.ss.u64Base;
4432 DescSS.Legacy.Gen.u8BaseHigh1 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 16);
4433 DescSS.Legacy.Gen.u8BaseHigh2 = (uint8_t)(pVCpu->cpum.GstCtx.ss.u64Base >> 24);
4434 DescSS.Legacy.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
4435 DescSS.Legacy.Gen.u2Dpl = 3;
4436 }
4437 else
4438 {
4439 uint8_t const uNewCpl = (uNewCS & X86_SEL_RPL);
4440
4441 /*
4442 * Load the stack segment for the new task.
4443 */
4444 if (!(uNewSS & X86_SEL_MASK_OFF_RPL))
4445 {
4446 Log(("iemTaskSwitch: Null stack segment. enmTaskSwitch=%u uNewSS=%#x -> #TS\n", enmTaskSwitch, uNewSS));
4447 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4448 }
4449
4450 /* Fetch the descriptor. */
4451 rcStrict = iemMemFetchSelDesc(pVCpu, &DescSS, uNewSS, X86_XCPT_TS);
4452 if (rcStrict != VINF_SUCCESS)
4453 {
4454 Log(("iemTaskSwitch: failed to fetch SS. uNewSS=%#x rc=%Rrc\n", uNewSS,
4455 VBOXSTRICTRC_VAL(rcStrict)));
4456 return rcStrict;
4457 }
4458
4459 /* SS must be a data segment and writable. */
4460 if ( !DescSS.Legacy.Gen.u1DescType
4461 || (DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE)
4462 || !(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_WRITE))
4463 {
4464 Log(("iemTaskSwitch: SS invalid descriptor type. uNewSS=%#x u1DescType=%u u4Type=%#x\n",
4465 uNewSS, DescSS.Legacy.Gen.u1DescType, DescSS.Legacy.Gen.u4Type));
4466 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4467 }
4468
4469 /* The SS.RPL, SS.DPL, CS.RPL (CPL) must be equal. */
4470 if ( (uNewSS & X86_SEL_RPL) != uNewCpl
4471 || DescSS.Legacy.Gen.u2Dpl != uNewCpl)
4472 {
4473 Log(("iemTaskSwitch: Invalid priv. for SS. uNewSS=%#x SS.DPL=%u uNewCpl=%u -> #TS\n", uNewSS, DescSS.Legacy.Gen.u2Dpl,
4474 uNewCpl));
4475 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4476 }
4477
4478 /* Is it there? */
4479 if (!DescSS.Legacy.Gen.u1Present)
4480 {
4481 Log(("iemTaskSwitch: SS not present. uNewSS=%#x -> #NP\n", uNewSS));
4482 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewSS & X86_SEL_MASK_OFF_RPL);
4483 }
4484
4485 uint32_t cbLimit = X86DESC_LIMIT_G(&DescSS.Legacy);
4486 uint64_t u64Base = X86DESC_BASE(&DescSS.Legacy);
4487
4488 /* Set the accessed bit before committing the result into SS. */
4489 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4490 {
4491 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewSS);
4492 if (rcStrict != VINF_SUCCESS)
4493 return rcStrict;
4494 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4495 }
4496
4497 /* Commit SS. */
4498 pVCpu->cpum.GstCtx.ss.Sel = uNewSS;
4499 pVCpu->cpum.GstCtx.ss.ValidSel = uNewSS;
4500 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
4501 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimit;
4502 pVCpu->cpum.GstCtx.ss.u64Base = u64Base;
4503 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
4504 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
4505
4506 /* CPL has changed, update IEM before loading rest of segments. */
4507 pVCpu->iem.s.uCpl = uNewCpl;
4508
4509 /*
4510 * Load the data segments for the new task.
4511 */
4512 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.es, uNewES);
4513 if (rcStrict != VINF_SUCCESS)
4514 return rcStrict;
4515 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.ds, uNewDS);
4516 if (rcStrict != VINF_SUCCESS)
4517 return rcStrict;
4518 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.fs, uNewFS);
4519 if (rcStrict != VINF_SUCCESS)
4520 return rcStrict;
4521 rcStrict = iemHlpTaskSwitchLoadDataSelectorInProtMode(pVCpu, &pVCpu->cpum.GstCtx.gs, uNewGS);
4522 if (rcStrict != VINF_SUCCESS)
4523 return rcStrict;
4524
4525 /*
4526 * Load the code segment for the new task.
4527 */
4528 if (!(uNewCS & X86_SEL_MASK_OFF_RPL))
4529 {
4530 Log(("iemTaskSwitch #TS: Null code segment. enmTaskSwitch=%u uNewCS=%#x\n", enmTaskSwitch, uNewCS));
4531 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4532 }
4533
4534 /* Fetch the descriptor. */
4535 IEMSELDESC DescCS;
4536 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, uNewCS, X86_XCPT_TS);
4537 if (rcStrict != VINF_SUCCESS)
4538 {
4539 Log(("iemTaskSwitch: failed to fetch CS. uNewCS=%u rc=%Rrc\n", uNewCS, VBOXSTRICTRC_VAL(rcStrict)));
4540 return rcStrict;
4541 }
4542
4543 /* CS must be a code segment. */
4544 if ( !DescCS.Legacy.Gen.u1DescType
4545 || !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4546 {
4547 Log(("iemTaskSwitch: CS invalid descriptor type. uNewCS=%#x u1DescType=%u u4Type=%#x -> #TS\n", uNewCS,
4548 DescCS.Legacy.Gen.u1DescType, DescCS.Legacy.Gen.u4Type));
4549 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4550 }
4551
4552 /* For conforming CS, DPL must be less than or equal to the RPL. */
4553 if ( (DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4554 && DescCS.Legacy.Gen.u2Dpl > (uNewCS & X86_SEL_RPL))
4555 {
4556 Log(("iemTaskSwitch: confirming CS DPL > RPL. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS, DescCS.Legacy.Gen.u4Type,
4557 DescCS.Legacy.Gen.u2Dpl));
4558 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4559 }
4560
4561 /* For non-conforming CS, DPL must match RPL. */
4562 if ( !(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF)
4563 && DescCS.Legacy.Gen.u2Dpl != (uNewCS & X86_SEL_RPL))
4564 {
4565 Log(("iemTaskSwitch: non-confirming CS DPL RPL mismatch. uNewCS=%#x u4Type=%#x DPL=%u -> #TS\n", uNewCS,
4566 DescCS.Legacy.Gen.u4Type, DescCS.Legacy.Gen.u2Dpl));
4567 return iemRaiseTaskSwitchFaultWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4568 }
4569
4570 /* Is it there? */
4571 if (!DescCS.Legacy.Gen.u1Present)
4572 {
4573 Log(("iemTaskSwitch: CS not present. uNewCS=%#x -> #NP\n", uNewCS));
4574 return iemRaiseSelectorNotPresentWithErr(pVCpu, uNewCS & X86_SEL_MASK_OFF_RPL);
4575 }
4576
4577 cbLimit = X86DESC_LIMIT_G(&DescCS.Legacy);
4578 u64Base = X86DESC_BASE(&DescCS.Legacy);
4579
4580 /* Set the accessed bit before committing the result into CS. */
4581 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
4582 {
4583 rcStrict = iemMemMarkSelDescAccessed(pVCpu, uNewCS);
4584 if (rcStrict != VINF_SUCCESS)
4585 return rcStrict;
4586 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
4587 }
4588
4589 /* Commit CS. */
4590 pVCpu->cpum.GstCtx.cs.Sel = uNewCS;
4591 pVCpu->cpum.GstCtx.cs.ValidSel = uNewCS;
4592 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
4593 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimit;
4594 pVCpu->cpum.GstCtx.cs.u64Base = u64Base;
4595 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
4596 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
4597 }
4598
4599 /** @todo Debug trap. */
4600 if (fIsNewTSS386 && fNewDebugTrap)
4601 Log(("iemTaskSwitch: Debug Trap set in new TSS. Not implemented!\n"));
4602
4603 /*
4604 * Construct the error code masks based on what caused this task switch.
4605 * See Intel Instruction reference for INT.
4606 */
4607 uint16_t uExt;
4608 if ( enmTaskSwitch == IEMTASKSWITCH_INT_XCPT
4609 && ( !(fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4610 || (fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)))
4611 {
4612 uExt = 1;
4613 }
4614 else
4615 uExt = 0;
4616
4617 /*
4618 * Push any error code on to the new stack.
4619 */
4620 if (fFlags & IEM_XCPT_FLAGS_ERR)
4621 {
4622 Assert(enmTaskSwitch == IEMTASKSWITCH_INT_XCPT);
4623 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4624 uint8_t const cbStackFrame = fIsNewTSS386 ? 4 : 2;
4625
4626 /* Check that there is sufficient space on the stack. */
4627 /** @todo Factor out segment limit checking for normal/expand down segments
4628 * into a separate function. */
4629 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4630 {
4631 if ( pVCpu->cpum.GstCtx.esp - 1 > cbLimitSS
4632 || pVCpu->cpum.GstCtx.esp < cbStackFrame)
4633 {
4634 /** @todo Intel says \#SS(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4635 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #SS\n",
4636 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4637 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4638 }
4639 }
4640 else
4641 {
4642 if ( pVCpu->cpum.GstCtx.esp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
4643 || pVCpu->cpum.GstCtx.esp - cbStackFrame < cbLimitSS + UINT32_C(1))
4644 {
4645 Log(("iemTaskSwitch: SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #SS\n",
4646 pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, cbStackFrame));
4647 return iemRaiseStackSelectorNotPresentWithErr(pVCpu, uExt);
4648 }
4649 }
4650
4651
4652 if (fIsNewTSS386)
4653 rcStrict = iemMemStackPushU32(pVCpu, uErr);
4654 else
4655 rcStrict = iemMemStackPushU16(pVCpu, uErr);
4656 if (rcStrict != VINF_SUCCESS)
4657 {
4658 Log(("iemTaskSwitch: Can't push error code to new task's stack. %s-bit TSS. rc=%Rrc\n",
4659 fIsNewTSS386 ? "32" : "16", VBOXSTRICTRC_VAL(rcStrict)));
4660 return rcStrict;
4661 }
4662 }
4663
4664 /* Check the new EIP against the new CS limit. */
4665 if (pVCpu->cpum.GstCtx.eip > pVCpu->cpum.GstCtx.cs.u32Limit)
4666 {
4667 Log(("iemHlpTaskSwitchLoadDataSelectorInProtMode: New EIP exceeds CS limit. uNewEIP=%#RX32 CS limit=%u -> #GP(0)\n",
4668 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.cs.u32Limit));
4669 /** @todo Intel says \#GP(EXT) for INT/XCPT, I couldn't figure out AMD yet. */
4670 return iemRaiseGeneralProtectionFault(pVCpu, uExt);
4671 }
4672
4673 Log(("iemTaskSwitch: Success! New CS:EIP=%#04x:%#x SS=%#04x\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip,
4674 pVCpu->cpum.GstCtx.ss.Sel));
4675 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
4676}
4677
4678
4679/**
4680 * Implements exceptions and interrupts for protected mode.
4681 *
4682 * @returns VBox strict status code.
4683 * @param pVCpu The cross context virtual CPU structure of the calling thread.
4684 * @param cbInstr The number of bytes to offset rIP by in the return
4685 * address.
4686 * @param u8Vector The interrupt / exception vector number.
4687 * @param fFlags The flags.
4688 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
4689 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
4690 */
4691IEM_STATIC VBOXSTRICTRC
4692iemRaiseXcptOrIntInProtMode(PVMCPUCC pVCpu,
4693 uint8_t cbInstr,
4694 uint8_t u8Vector,
4695 uint32_t fFlags,
4696 uint16_t uErr,
4697 uint64_t uCr2)
4698{
4699 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
4700
4701 /*
4702 * Read the IDT entry.
4703 */
4704 if (pVCpu->cpum.GstCtx.idtr.cbIdt < UINT32_C(8) * u8Vector + 7)
4705 {
4706 Log(("RaiseXcptOrIntInProtMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
4707 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4708 }
4709 X86DESC Idte;
4710 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.u, UINT8_MAX,
4711 pVCpu->cpum.GstCtx.idtr.pIdt + UINT32_C(8) * u8Vector);
4712 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
4713 {
4714 Log(("iemRaiseXcptOrIntInProtMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
4715 return rcStrict;
4716 }
4717 Log(("iemRaiseXcptOrIntInProtMode: vec=%#x P=%u DPL=%u DT=%u:%u A=%u %04x:%04x%04x\n",
4718 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
4719 Idte.Gate.u5ParmCount, Idte.Gate.u16Sel, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
4720
4721 /*
4722 * Check the descriptor type, DPL and such.
4723 * ASSUMES this is done in the same order as described for call-gate calls.
4724 */
4725 if (Idte.Gate.u1DescType)
4726 {
4727 Log(("RaiseXcptOrIntInProtMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4728 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4729 }
4730 bool fTaskGate = false;
4731 uint8_t f32BitGate = true;
4732 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
4733 switch (Idte.Gate.u4Type)
4734 {
4735 case X86_SEL_TYPE_SYS_UNDEFINED:
4736 case X86_SEL_TYPE_SYS_286_TSS_AVAIL:
4737 case X86_SEL_TYPE_SYS_LDT:
4738 case X86_SEL_TYPE_SYS_286_TSS_BUSY:
4739 case X86_SEL_TYPE_SYS_286_CALL_GATE:
4740 case X86_SEL_TYPE_SYS_UNDEFINED2:
4741 case X86_SEL_TYPE_SYS_386_TSS_AVAIL:
4742 case X86_SEL_TYPE_SYS_UNDEFINED3:
4743 case X86_SEL_TYPE_SYS_386_TSS_BUSY:
4744 case X86_SEL_TYPE_SYS_386_CALL_GATE:
4745 case X86_SEL_TYPE_SYS_UNDEFINED4:
4746 {
4747 /** @todo check what actually happens when the type is wrong...
4748 * esp. call gates. */
4749 Log(("RaiseXcptOrIntInProtMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
4750 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4751 }
4752
4753 case X86_SEL_TYPE_SYS_286_INT_GATE:
4754 f32BitGate = false;
4755 RT_FALL_THRU();
4756 case X86_SEL_TYPE_SYS_386_INT_GATE:
4757 fEflToClear |= X86_EFL_IF;
4758 break;
4759
4760 case X86_SEL_TYPE_SYS_TASK_GATE:
4761 fTaskGate = true;
4762#ifndef IEM_IMPLEMENTS_TASKSWITCH
4763 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Task gates\n"));
4764#endif
4765 break;
4766
4767 case X86_SEL_TYPE_SYS_286_TRAP_GATE:
4768 f32BitGate = false;
4769 case X86_SEL_TYPE_SYS_386_TRAP_GATE:
4770 break;
4771
4772 IEM_NOT_REACHED_DEFAULT_CASE_RET();
4773 }
4774
4775 /* Check DPL against CPL if applicable. */
4776 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
4777 {
4778 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
4779 {
4780 Log(("RaiseXcptOrIntInProtMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
4781 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4782 }
4783 }
4784
4785 /* Is it there? */
4786 if (!Idte.Gate.u1Present)
4787 {
4788 Log(("RaiseXcptOrIntInProtMode %#x - not present -> #NP\n", u8Vector));
4789 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
4790 }
4791
4792 /* Is it a task-gate? */
4793 if (fTaskGate)
4794 {
4795 /*
4796 * Construct the error code masks based on what caused this task switch.
4797 * See Intel Instruction reference for INT.
4798 */
4799 uint16_t const uExt = ( (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT)
4800 && !(fFlags & IEM_XCPT_FLAGS_ICEBP_INSTR)) ? 0 : 1;
4801 uint16_t const uSelMask = X86_SEL_MASK_OFF_RPL;
4802 RTSEL SelTSS = Idte.Gate.u16Sel;
4803
4804 /*
4805 * Fetch the TSS descriptor in the GDT.
4806 */
4807 IEMSELDESC DescTSS;
4808 rcStrict = iemMemFetchSelDescWithErr(pVCpu, &DescTSS, SelTSS, X86_XCPT_GP, (SelTSS & uSelMask) | uExt);
4809 if (rcStrict != VINF_SUCCESS)
4810 {
4811 Log(("RaiseXcptOrIntInProtMode %#x - failed to fetch TSS selector %#x, rc=%Rrc\n", u8Vector, SelTSS,
4812 VBOXSTRICTRC_VAL(rcStrict)));
4813 return rcStrict;
4814 }
4815
4816 /* The TSS descriptor must be a system segment and be available (not busy). */
4817 if ( DescTSS.Legacy.Gen.u1DescType
4818 || ( DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_286_TSS_AVAIL
4819 && DescTSS.Legacy.Gen.u4Type != X86_SEL_TYPE_SYS_386_TSS_AVAIL))
4820 {
4821 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x of task gate not a system descriptor or not available %#RX64\n",
4822 u8Vector, SelTSS, DescTSS.Legacy.au64));
4823 return iemRaiseGeneralProtectionFault(pVCpu, (SelTSS & uSelMask) | uExt);
4824 }
4825
4826 /* The TSS must be present. */
4827 if (!DescTSS.Legacy.Gen.u1Present)
4828 {
4829 Log(("RaiseXcptOrIntInProtMode %#x - TSS selector %#x not present %#RX64\n", u8Vector, SelTSS, DescTSS.Legacy.au64));
4830 return iemRaiseSelectorNotPresentWithErr(pVCpu, (SelTSS & uSelMask) | uExt);
4831 }
4832
4833 /* Do the actual task switch. */
4834 return iemTaskSwitch(pVCpu, IEMTASKSWITCH_INT_XCPT,
4835 (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip,
4836 fFlags, uErr, uCr2, SelTSS, &DescTSS);
4837 }
4838
4839 /* A null CS is bad. */
4840 RTSEL NewCS = Idte.Gate.u16Sel;
4841 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
4842 {
4843 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
4844 return iemRaiseGeneralProtectionFault0(pVCpu);
4845 }
4846
4847 /* Fetch the descriptor for the new CS. */
4848 IEMSELDESC DescCS;
4849 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP); /** @todo correct exception? */
4850 if (rcStrict != VINF_SUCCESS)
4851 {
4852 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
4853 return rcStrict;
4854 }
4855
4856 /* Must be a code segment. */
4857 if (!DescCS.Legacy.Gen.u1DescType)
4858 {
4859 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4860 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4861 }
4862 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CODE))
4863 {
4864 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - data selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
4865 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4866 }
4867
4868 /* Don't allow lowering the privilege level. */
4869 /** @todo Does the lowering of privileges apply to software interrupts
4870 * only? This has bearings on the more-privileged or
4871 * same-privilege stack behavior further down. A testcase would
4872 * be nice. */
4873 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
4874 {
4875 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
4876 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
4877 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
4878 }
4879
4880 /* Make sure the selector is present. */
4881 if (!DescCS.Legacy.Gen.u1Present)
4882 {
4883 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
4884 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
4885 }
4886
4887 /* Check the new EIP against the new CS limit. */
4888 uint32_t const uNewEip = Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_INT_GATE
4889 || Idte.Gate.u4Type == X86_SEL_TYPE_SYS_286_TRAP_GATE
4890 ? Idte.Gate.u16OffsetLow
4891 : Idte.Gate.u16OffsetLow | ((uint32_t)Idte.Gate.u16OffsetHigh << 16);
4892 uint32_t cbLimitCS = X86DESC_LIMIT_G(&DescCS.Legacy);
4893 if (uNewEip > cbLimitCS)
4894 {
4895 Log(("RaiseXcptOrIntInProtMode %#x - EIP=%#x > cbLimitCS=%#x (CS=%#x) -> #GP(0)\n",
4896 u8Vector, uNewEip, cbLimitCS, NewCS));
4897 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4898 }
4899 Log7(("iemRaiseXcptOrIntInProtMode: new EIP=%#x CS=%#x\n", uNewEip, NewCS));
4900
4901 /* Calc the flag image to push. */
4902 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
4903 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
4904 fEfl &= ~X86_EFL_RF;
4905 else
4906 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
4907
4908 /* From V8086 mode only go to CPL 0. */
4909 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
4910 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
4911 if ((fEfl & X86_EFL_VM) && uNewCpl != 0) /** @todo When exactly is this raised? */
4912 {
4913 Log(("RaiseXcptOrIntInProtMode %#x - CS=%#x - New CPL (%d) != 0 w/ VM=1 -> #GP\n", u8Vector, NewCS, uNewCpl));
4914 return iemRaiseGeneralProtectionFault(pVCpu, 0);
4915 }
4916
4917 /*
4918 * If the privilege level changes, we need to get a new stack from the TSS.
4919 * This in turns means validating the new SS and ESP...
4920 */
4921 if (uNewCpl != pVCpu->iem.s.uCpl)
4922 {
4923 RTSEL NewSS;
4924 uint32_t uNewEsp;
4925 rcStrict = iemRaiseLoadStackFromTss32Or16(pVCpu, uNewCpl, &NewSS, &uNewEsp);
4926 if (rcStrict != VINF_SUCCESS)
4927 return rcStrict;
4928
4929 IEMSELDESC DescSS;
4930 rcStrict = iemMiscValidateNewSS(pVCpu, NewSS, uNewCpl, &DescSS);
4931 if (rcStrict != VINF_SUCCESS)
4932 return rcStrict;
4933 /* If the new SS is 16-bit, we are only going to use SP, not ESP. */
4934 if (!DescSS.Legacy.Gen.u1DefBig)
4935 {
4936 Log(("iemRaiseXcptOrIntInProtMode: Forcing ESP=%#x to 16 bits\n", uNewEsp));
4937 uNewEsp = (uint16_t)uNewEsp;
4938 }
4939
4940 Log7(("iemRaiseXcptOrIntInProtMode: New SS=%#x ESP=%#x (from TSS); current SS=%#x ESP=%#x\n", NewSS, uNewEsp, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4941
4942 /* Check that there is sufficient space for the stack frame. */
4943 uint32_t cbLimitSS = X86DESC_LIMIT_G(&DescSS.Legacy);
4944 uint8_t const cbStackFrame = !(fEfl & X86_EFL_VM)
4945 ? (fFlags & IEM_XCPT_FLAGS_ERR ? 12 : 10) << f32BitGate
4946 : (fFlags & IEM_XCPT_FLAGS_ERR ? 20 : 18) << f32BitGate;
4947
4948 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_DOWN))
4949 {
4950 if ( uNewEsp - 1 > cbLimitSS
4951 || uNewEsp < cbStackFrame)
4952 {
4953 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x is out of bounds -> #GP\n",
4954 u8Vector, NewSS, uNewEsp, cbStackFrame));
4955 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4956 }
4957 }
4958 else
4959 {
4960 if ( uNewEsp - 1 > (DescSS.Legacy.Gen.u1DefBig ? UINT32_MAX : UINT16_MAX)
4961 || uNewEsp - cbStackFrame < cbLimitSS + UINT32_C(1))
4962 {
4963 Log(("RaiseXcptOrIntInProtMode: %#x - SS=%#x ESP=%#x cbStackFrame=%#x (expand down) is out of bounds -> #GP\n",
4964 u8Vector, NewSS, uNewEsp, cbStackFrame));
4965 return iemRaiseSelectorBoundsBySelector(pVCpu, NewSS);
4966 }
4967 }
4968
4969 /*
4970 * Start making changes.
4971 */
4972
4973 /* Set the new CPL so that stack accesses use it. */
4974 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
4975 pVCpu->iem.s.uCpl = uNewCpl;
4976
4977 /* Create the stack frame. */
4978 RTPTRUNION uStackFrame;
4979 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
4980 uNewEsp - cbStackFrame + X86DESC_BASE(&DescSS.Legacy), IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
4981 if (rcStrict != VINF_SUCCESS)
4982 return rcStrict;
4983 void * const pvStackFrame = uStackFrame.pv;
4984 if (f32BitGate)
4985 {
4986 if (fFlags & IEM_XCPT_FLAGS_ERR)
4987 *uStackFrame.pu32++ = uErr;
4988 uStackFrame.pu32[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
4989 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
4990 uStackFrame.pu32[2] = fEfl;
4991 uStackFrame.pu32[3] = pVCpu->cpum.GstCtx.esp;
4992 uStackFrame.pu32[4] = pVCpu->cpum.GstCtx.ss.Sel;
4993 Log7(("iemRaiseXcptOrIntInProtMode: 32-bit push SS=%#x ESP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp));
4994 if (fEfl & X86_EFL_VM)
4995 {
4996 uStackFrame.pu32[1] = pVCpu->cpum.GstCtx.cs.Sel;
4997 uStackFrame.pu32[5] = pVCpu->cpum.GstCtx.es.Sel;
4998 uStackFrame.pu32[6] = pVCpu->cpum.GstCtx.ds.Sel;
4999 uStackFrame.pu32[7] = pVCpu->cpum.GstCtx.fs.Sel;
5000 uStackFrame.pu32[8] = pVCpu->cpum.GstCtx.gs.Sel;
5001 }
5002 }
5003 else
5004 {
5005 if (fFlags & IEM_XCPT_FLAGS_ERR)
5006 *uStackFrame.pu16++ = uErr;
5007 uStackFrame.pu16[0] = (fFlags & IEM_XCPT_FLAGS_T_SOFT_INT) ? pVCpu->cpum.GstCtx.ip + cbInstr : pVCpu->cpum.GstCtx.ip;
5008 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl;
5009 uStackFrame.pu16[2] = fEfl;
5010 uStackFrame.pu16[3] = pVCpu->cpum.GstCtx.sp;
5011 uStackFrame.pu16[4] = pVCpu->cpum.GstCtx.ss.Sel;
5012 Log7(("iemRaiseXcptOrIntInProtMode: 16-bit push SS=%#x SP=%#x\n", pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.sp));
5013 if (fEfl & X86_EFL_VM)
5014 {
5015 uStackFrame.pu16[1] = pVCpu->cpum.GstCtx.cs.Sel;
5016 uStackFrame.pu16[5] = pVCpu->cpum.GstCtx.es.Sel;
5017 uStackFrame.pu16[6] = pVCpu->cpum.GstCtx.ds.Sel;
5018 uStackFrame.pu16[7] = pVCpu->cpum.GstCtx.fs.Sel;
5019 uStackFrame.pu16[8] = pVCpu->cpum.GstCtx.gs.Sel;
5020 }
5021 }
5022 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5023 if (rcStrict != VINF_SUCCESS)
5024 return rcStrict;
5025
5026 /* Mark the selectors 'accessed' (hope this is the correct time). */
5027 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5028 * after pushing the stack frame? (Write protect the gdt + stack to
5029 * find out.) */
5030 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5031 {
5032 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5033 if (rcStrict != VINF_SUCCESS)
5034 return rcStrict;
5035 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5036 }
5037
5038 if (!(DescSS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5039 {
5040 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewSS);
5041 if (rcStrict != VINF_SUCCESS)
5042 return rcStrict;
5043 DescSS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5044 }
5045
5046 /*
5047 * Start comitting the register changes (joins with the DPL=CPL branch).
5048 */
5049 pVCpu->cpum.GstCtx.ss.Sel = NewSS;
5050 pVCpu->cpum.GstCtx.ss.ValidSel = NewSS;
5051 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5052 pVCpu->cpum.GstCtx.ss.u32Limit = cbLimitSS;
5053 pVCpu->cpum.GstCtx.ss.u64Base = X86DESC_BASE(&DescSS.Legacy);
5054 pVCpu->cpum.GstCtx.ss.Attr.u = X86DESC_GET_HID_ATTR(&DescSS.Legacy);
5055 /** @todo When coming from 32-bit code and operating with a 16-bit TSS and
5056 * 16-bit handler, the high word of ESP remains unchanged (i.e. only
5057 * SP is loaded).
5058 * Need to check the other combinations too:
5059 * - 16-bit TSS, 32-bit handler
5060 * - 32-bit TSS, 16-bit handler */
5061 if (!pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
5062 pVCpu->cpum.GstCtx.sp = (uint16_t)(uNewEsp - cbStackFrame);
5063 else
5064 pVCpu->cpum.GstCtx.rsp = uNewEsp - cbStackFrame;
5065
5066 if (fEfl & X86_EFL_VM)
5067 {
5068 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.gs);
5069 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.fs);
5070 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.es);
5071 iemHlpLoadNullDataSelectorOnV86Xcpt(pVCpu, &pVCpu->cpum.GstCtx.ds);
5072 }
5073 }
5074 /*
5075 * Same privilege, no stack change and smaller stack frame.
5076 */
5077 else
5078 {
5079 uint64_t uNewRsp;
5080 RTPTRUNION uStackFrame;
5081 uint8_t const cbStackFrame = (fFlags & IEM_XCPT_FLAGS_ERR ? 8 : 6) << f32BitGate;
5082 rcStrict = iemMemStackPushBeginSpecial(pVCpu, cbStackFrame, &uStackFrame.pv, &uNewRsp);
5083 if (rcStrict != VINF_SUCCESS)
5084 return rcStrict;
5085 void * const pvStackFrame = uStackFrame.pv;
5086
5087 if (f32BitGate)
5088 {
5089 if (fFlags & IEM_XCPT_FLAGS_ERR)
5090 *uStackFrame.pu32++ = uErr;
5091 uStackFrame.pu32[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5092 uStackFrame.pu32[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5093 uStackFrame.pu32[2] = fEfl;
5094 }
5095 else
5096 {
5097 if (fFlags & IEM_XCPT_FLAGS_ERR)
5098 *uStackFrame.pu16++ = uErr;
5099 uStackFrame.pu16[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.eip + cbInstr : pVCpu->cpum.GstCtx.eip;
5100 uStackFrame.pu16[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | pVCpu->iem.s.uCpl;
5101 uStackFrame.pu16[2] = fEfl;
5102 }
5103 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W); /* don't use the commit here */
5104 if (rcStrict != VINF_SUCCESS)
5105 return rcStrict;
5106
5107 /* Mark the CS selector as 'accessed'. */
5108 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5109 {
5110 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5111 if (rcStrict != VINF_SUCCESS)
5112 return rcStrict;
5113 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5114 }
5115
5116 /*
5117 * Start committing the register changes (joins with the other branch).
5118 */
5119 pVCpu->cpum.GstCtx.rsp = uNewRsp;
5120 }
5121
5122 /* ... register committing continues. */
5123 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5124 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5125 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5126 pVCpu->cpum.GstCtx.cs.u32Limit = cbLimitCS;
5127 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5128 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5129
5130 pVCpu->cpum.GstCtx.rip = uNewEip; /* (The entire register is modified, see pe16_32 bs3kit tests.) */
5131 fEfl &= ~fEflToClear;
5132 IEMMISC_SET_EFL(pVCpu, fEfl);
5133
5134 if (fFlags & IEM_XCPT_FLAGS_CR2)
5135 pVCpu->cpum.GstCtx.cr2 = uCr2;
5136
5137 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5138 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5139
5140 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5141}
5142
5143
5144/**
5145 * Implements exceptions and interrupts for long mode.
5146 *
5147 * @returns VBox strict status code.
5148 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5149 * @param cbInstr The number of bytes to offset rIP by in the return
5150 * address.
5151 * @param u8Vector The interrupt / exception vector number.
5152 * @param fFlags The flags.
5153 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5154 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5155 */
5156IEM_STATIC VBOXSTRICTRC
5157iemRaiseXcptOrIntInLongMode(PVMCPUCC pVCpu,
5158 uint8_t cbInstr,
5159 uint8_t u8Vector,
5160 uint32_t fFlags,
5161 uint16_t uErr,
5162 uint64_t uCr2)
5163{
5164 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5165
5166 /*
5167 * Read the IDT entry.
5168 */
5169 uint16_t offIdt = (uint16_t)u8Vector << 4;
5170 if (pVCpu->cpum.GstCtx.idtr.cbIdt < offIdt + 7)
5171 {
5172 Log(("iemRaiseXcptOrIntInLongMode: %#x is out of bounds (%#x)\n", u8Vector, pVCpu->cpum.GstCtx.idtr.cbIdt));
5173 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5174 }
5175 X86DESC64 Idte;
5176#ifdef _MSC_VER /* Shut up silly compiler warning. */
5177 Idte.au64[0] = 0;
5178 Idte.au64[1] = 0;
5179#endif
5180 VBOXSTRICTRC rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[0], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt);
5181 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
5182 rcStrict = iemMemFetchSysU64(pVCpu, &Idte.au64[1], UINT8_MAX, pVCpu->cpum.GstCtx.idtr.pIdt + offIdt + 8);
5183 if (RT_UNLIKELY(rcStrict != VINF_SUCCESS))
5184 {
5185 Log(("iemRaiseXcptOrIntInLongMode: failed to fetch IDT entry! vec=%#x rc=%Rrc\n", u8Vector, VBOXSTRICTRC_VAL(rcStrict)));
5186 return rcStrict;
5187 }
5188 Log(("iemRaiseXcptOrIntInLongMode: vec=%#x P=%u DPL=%u DT=%u:%u IST=%u %04x:%08x%04x%04x\n",
5189 u8Vector, Idte.Gate.u1Present, Idte.Gate.u2Dpl, Idte.Gate.u1DescType, Idte.Gate.u4Type,
5190 Idte.Gate.u3IST, Idte.Gate.u16Sel, Idte.Gate.u32OffsetTop, Idte.Gate.u16OffsetHigh, Idte.Gate.u16OffsetLow));
5191
5192 /*
5193 * Check the descriptor type, DPL and such.
5194 * ASSUMES this is done in the same order as described for call-gate calls.
5195 */
5196 if (Idte.Gate.u1DescType)
5197 {
5198 Log(("iemRaiseXcptOrIntInLongMode %#x - not system selector (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5199 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5200 }
5201 uint32_t fEflToClear = X86_EFL_TF | X86_EFL_NT | X86_EFL_RF | X86_EFL_VM;
5202 switch (Idte.Gate.u4Type)
5203 {
5204 case AMD64_SEL_TYPE_SYS_INT_GATE:
5205 fEflToClear |= X86_EFL_IF;
5206 break;
5207 case AMD64_SEL_TYPE_SYS_TRAP_GATE:
5208 break;
5209
5210 default:
5211 Log(("iemRaiseXcptOrIntInLongMode %#x - invalid type (%#x) -> #GP\n", u8Vector, Idte.Gate.u4Type));
5212 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5213 }
5214
5215 /* Check DPL against CPL if applicable. */
5216 if ((fFlags & (IEM_XCPT_FLAGS_T_SOFT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT)
5217 {
5218 if (pVCpu->iem.s.uCpl > Idte.Gate.u2Dpl)
5219 {
5220 Log(("iemRaiseXcptOrIntInLongMode %#x - CPL (%d) > DPL (%d) -> #GP\n", u8Vector, pVCpu->iem.s.uCpl, Idte.Gate.u2Dpl));
5221 return iemRaiseGeneralProtectionFault(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5222 }
5223 }
5224
5225 /* Is it there? */
5226 if (!Idte.Gate.u1Present)
5227 {
5228 Log(("iemRaiseXcptOrIntInLongMode %#x - not present -> #NP\n", u8Vector));
5229 return iemRaiseSelectorNotPresentWithErr(pVCpu, X86_TRAP_ERR_IDT | ((uint16_t)u8Vector << X86_TRAP_ERR_SEL_SHIFT));
5230 }
5231
5232 /* A null CS is bad. */
5233 RTSEL NewCS = Idte.Gate.u16Sel;
5234 if (!(NewCS & X86_SEL_MASK_OFF_RPL))
5235 {
5236 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x -> #GP\n", u8Vector, NewCS));
5237 return iemRaiseGeneralProtectionFault0(pVCpu);
5238 }
5239
5240 /* Fetch the descriptor for the new CS. */
5241 IEMSELDESC DescCS;
5242 rcStrict = iemMemFetchSelDesc(pVCpu, &DescCS, NewCS, X86_XCPT_GP);
5243 if (rcStrict != VINF_SUCCESS)
5244 {
5245 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - rc=%Rrc\n", u8Vector, NewCS, VBOXSTRICTRC_VAL(rcStrict)));
5246 return rcStrict;
5247 }
5248
5249 /* Must be a 64-bit code segment. */
5250 if (!DescCS.Long.Gen.u1DescType)
5251 {
5252 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - system selector (%#x) -> #GP\n", u8Vector, NewCS, DescCS.Legacy.Gen.u4Type));
5253 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5254 }
5255 if ( !DescCS.Long.Gen.u1Long
5256 || DescCS.Long.Gen.u1DefBig
5257 || !(DescCS.Long.Gen.u4Type & X86_SEL_TYPE_CODE) )
5258 {
5259 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - not 64-bit code selector (%#x, L=%u, D=%u) -> #GP\n",
5260 u8Vector, NewCS, DescCS.Legacy.Gen.u4Type, DescCS.Long.Gen.u1Long, DescCS.Long.Gen.u1DefBig));
5261 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5262 }
5263
5264 /* Don't allow lowering the privilege level. For non-conforming CS
5265 selectors, the CS.DPL sets the privilege level the trap/interrupt
5266 handler runs at. For conforming CS selectors, the CPL remains
5267 unchanged, but the CS.DPL must be <= CPL. */
5268 /** @todo Testcase: Interrupt handler with CS.DPL=1, interrupt dispatched
5269 * when CPU in Ring-0. Result \#GP? */
5270 if (DescCS.Legacy.Gen.u2Dpl > pVCpu->iem.s.uCpl)
5271 {
5272 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - DPL (%d) > CPL (%d) -> #GP\n",
5273 u8Vector, NewCS, DescCS.Legacy.Gen.u2Dpl, pVCpu->iem.s.uCpl));
5274 return iemRaiseGeneralProtectionFault(pVCpu, NewCS & X86_SEL_MASK_OFF_RPL);
5275 }
5276
5277
5278 /* Make sure the selector is present. */
5279 if (!DescCS.Legacy.Gen.u1Present)
5280 {
5281 Log(("iemRaiseXcptOrIntInLongMode %#x - CS=%#x - segment not present -> #NP\n", u8Vector, NewCS));
5282 return iemRaiseSelectorNotPresentBySelector(pVCpu, NewCS);
5283 }
5284
5285 /* Check that the new RIP is canonical. */
5286 uint64_t const uNewRip = Idte.Gate.u16OffsetLow
5287 | ((uint32_t)Idte.Gate.u16OffsetHigh << 16)
5288 | ((uint64_t)Idte.Gate.u32OffsetTop << 32);
5289 if (!IEM_IS_CANONICAL(uNewRip))
5290 {
5291 Log(("iemRaiseXcptOrIntInLongMode %#x - RIP=%#RX64 - Not canonical -> #GP(0)\n", u8Vector, uNewRip));
5292 return iemRaiseGeneralProtectionFault0(pVCpu);
5293 }
5294
5295 /*
5296 * If the privilege level changes or if the IST isn't zero, we need to get
5297 * a new stack from the TSS.
5298 */
5299 uint64_t uNewRsp;
5300 uint8_t const uNewCpl = DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_CONF
5301 ? pVCpu->iem.s.uCpl : DescCS.Legacy.Gen.u2Dpl;
5302 if ( uNewCpl != pVCpu->iem.s.uCpl
5303 || Idte.Gate.u3IST != 0)
5304 {
5305 rcStrict = iemRaiseLoadStackFromTss64(pVCpu, uNewCpl, Idte.Gate.u3IST, &uNewRsp);
5306 if (rcStrict != VINF_SUCCESS)
5307 return rcStrict;
5308 }
5309 else
5310 uNewRsp = pVCpu->cpum.GstCtx.rsp;
5311 uNewRsp &= ~(uint64_t)0xf;
5312
5313 /*
5314 * Calc the flag image to push.
5315 */
5316 uint32_t fEfl = IEMMISC_GET_EFL(pVCpu);
5317 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT))
5318 fEfl &= ~X86_EFL_RF;
5319 else
5320 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */
5321
5322 /*
5323 * Start making changes.
5324 */
5325 /* Set the new CPL so that stack accesses use it. */
5326 uint8_t const uOldCpl = pVCpu->iem.s.uCpl;
5327 pVCpu->iem.s.uCpl = uNewCpl;
5328
5329 /* Create the stack frame. */
5330 uint32_t cbStackFrame = sizeof(uint64_t) * (5 + !!(fFlags & IEM_XCPT_FLAGS_ERR));
5331 RTPTRUNION uStackFrame;
5332 rcStrict = iemMemMap(pVCpu, &uStackFrame.pv, cbStackFrame, UINT8_MAX,
5333 uNewRsp - cbStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS); /* _SYS is a hack ... */
5334 if (rcStrict != VINF_SUCCESS)
5335 return rcStrict;
5336 void * const pvStackFrame = uStackFrame.pv;
5337
5338 if (fFlags & IEM_XCPT_FLAGS_ERR)
5339 *uStackFrame.pu64++ = uErr;
5340 uStackFrame.pu64[0] = fFlags & IEM_XCPT_FLAGS_T_SOFT_INT ? pVCpu->cpum.GstCtx.rip + cbInstr : pVCpu->cpum.GstCtx.rip;
5341 uStackFrame.pu64[1] = (pVCpu->cpum.GstCtx.cs.Sel & ~X86_SEL_RPL) | uOldCpl; /* CPL paranoia */
5342 uStackFrame.pu64[2] = fEfl;
5343 uStackFrame.pu64[3] = pVCpu->cpum.GstCtx.rsp;
5344 uStackFrame.pu64[4] = pVCpu->cpum.GstCtx.ss.Sel;
5345 rcStrict = iemMemCommitAndUnmap(pVCpu, pvStackFrame, IEM_ACCESS_STACK_W | IEM_ACCESS_WHAT_SYS);
5346 if (rcStrict != VINF_SUCCESS)
5347 return rcStrict;
5348
5349 /* Mark the CS selectors 'accessed' (hope this is the correct time). */
5350 /** @todo testcase: excatly _when_ are the accessed bits set - before or
5351 * after pushing the stack frame? (Write protect the gdt + stack to
5352 * find out.) */
5353 if (!(DescCS.Legacy.Gen.u4Type & X86_SEL_TYPE_ACCESSED))
5354 {
5355 rcStrict = iemMemMarkSelDescAccessed(pVCpu, NewCS);
5356 if (rcStrict != VINF_SUCCESS)
5357 return rcStrict;
5358 DescCS.Legacy.Gen.u4Type |= X86_SEL_TYPE_ACCESSED;
5359 }
5360
5361 /*
5362 * Start comitting the register changes.
5363 */
5364 /** @todo research/testcase: Figure out what VT-x and AMD-V loads into the
5365 * hidden registers when interrupting 32-bit or 16-bit code! */
5366 if (uNewCpl != uOldCpl)
5367 {
5368 pVCpu->cpum.GstCtx.ss.Sel = 0 | uNewCpl;
5369 pVCpu->cpum.GstCtx.ss.ValidSel = 0 | uNewCpl;
5370 pVCpu->cpum.GstCtx.ss.fFlags = CPUMSELREG_FLAGS_VALID;
5371 pVCpu->cpum.GstCtx.ss.u32Limit = UINT32_MAX;
5372 pVCpu->cpum.GstCtx.ss.u64Base = 0;
5373 pVCpu->cpum.GstCtx.ss.Attr.u = (uNewCpl << X86DESCATTR_DPL_SHIFT) | X86DESCATTR_UNUSABLE;
5374 }
5375 pVCpu->cpum.GstCtx.rsp = uNewRsp - cbStackFrame;
5376 pVCpu->cpum.GstCtx.cs.Sel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5377 pVCpu->cpum.GstCtx.cs.ValidSel = (NewCS & ~X86_SEL_RPL) | uNewCpl;
5378 pVCpu->cpum.GstCtx.cs.fFlags = CPUMSELREG_FLAGS_VALID;
5379 pVCpu->cpum.GstCtx.cs.u32Limit = X86DESC_LIMIT_G(&DescCS.Legacy);
5380 pVCpu->cpum.GstCtx.cs.u64Base = X86DESC_BASE(&DescCS.Legacy);
5381 pVCpu->cpum.GstCtx.cs.Attr.u = X86DESC_GET_HID_ATTR(&DescCS.Legacy);
5382 pVCpu->cpum.GstCtx.rip = uNewRip;
5383
5384 fEfl &= ~fEflToClear;
5385 IEMMISC_SET_EFL(pVCpu, fEfl);
5386
5387 if (fFlags & IEM_XCPT_FLAGS_CR2)
5388 pVCpu->cpum.GstCtx.cr2 = uCr2;
5389
5390 if (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT)
5391 iemRaiseXcptAdjustState(pVCpu, u8Vector);
5392
5393 return fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT ? VINF_IEM_RAISED_XCPT : VINF_SUCCESS;
5394}
5395
5396
5397/**
5398 * Implements exceptions and interrupts.
5399 *
5400 * All exceptions and interrupts goes thru this function!
5401 *
5402 * @returns VBox strict status code.
5403 * @param pVCpu The cross context virtual CPU structure of the calling thread.
5404 * @param cbInstr The number of bytes to offset rIP by in the return
5405 * address.
5406 * @param u8Vector The interrupt / exception vector number.
5407 * @param fFlags The flags.
5408 * @param uErr The error value if IEM_XCPT_FLAGS_ERR is set.
5409 * @param uCr2 The CR2 value if IEM_XCPT_FLAGS_CR2 is set.
5410 */
5411DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC)
5412iemRaiseXcptOrInt(PVMCPUCC pVCpu,
5413 uint8_t cbInstr,
5414 uint8_t u8Vector,
5415 uint32_t fFlags,
5416 uint16_t uErr,
5417 uint64_t uCr2)
5418{
5419 /*
5420 * Get all the state that we might need here.
5421 */
5422 IEM_CTX_IMPORT_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5423 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
5424
5425#ifndef IEM_WITH_CODE_TLB /** @todo we're doing it afterwards too, that should suffice... */
5426 /*
5427 * Flush prefetch buffer
5428 */
5429 pVCpu->iem.s.cbOpcode = pVCpu->iem.s.offOpcode;
5430#endif
5431
5432 /*
5433 * Perform the V8086 IOPL check and upgrade the fault without nesting.
5434 */
5435 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
5436 && pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL != 3
5437 && (fFlags & ( IEM_XCPT_FLAGS_T_SOFT_INT
5438 | IEM_XCPT_FLAGS_BP_INSTR
5439 | IEM_XCPT_FLAGS_ICEBP_INSTR
5440 | IEM_XCPT_FLAGS_OF_INSTR)) == IEM_XCPT_FLAGS_T_SOFT_INT
5441 && (pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) )
5442 {
5443 Log(("iemRaiseXcptOrInt: V8086 IOPL check failed for int %#x -> #GP(0)\n", u8Vector));
5444 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5445 u8Vector = X86_XCPT_GP;
5446 uErr = 0;
5447 }
5448#ifdef DBGFTRACE_ENABLED
5449 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "Xcpt/%u: %02x %u %x %x %llx %04x:%04llx %04x:%04llx",
5450 pVCpu->iem.s.cXcptRecursions, u8Vector, cbInstr, fFlags, uErr, uCr2,
5451 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp);
5452#endif
5453
5454 /*
5455 * Evaluate whether NMI blocking should be in effect.
5456 * Normally, NMI blocking is in effect whenever we inject an NMI.
5457 */
5458 bool fBlockNmi;
5459 if ( u8Vector == X86_XCPT_NMI
5460 && (fFlags & IEM_XCPT_FLAGS_T_CPU_XCPT))
5461 fBlockNmi = true;
5462 else
5463 fBlockNmi = false;
5464
5465#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5466 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5467 {
5468 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEvent(pVCpu, u8Vector, fFlags, uErr, uCr2, cbInstr);
5469 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5470 return rcStrict0;
5471
5472 /* If virtual-NMI blocking is in effect for the nested-guest, guest NMIs are not blocked. */
5473 if (pVCpu->cpum.GstCtx.hwvirt.vmx.fVirtNmiBlocking)
5474 {
5475 Assert(CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_VIRT_NMI));
5476 fBlockNmi = false;
5477 }
5478 }
5479#endif
5480
5481#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
5482 if (CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)))
5483 {
5484 /*
5485 * If the event is being injected as part of VMRUN, it isn't subject to event
5486 * intercepts in the nested-guest. However, secondary exceptions that occur
5487 * during injection of any event -are- subject to exception intercepts.
5488 *
5489 * See AMD spec. 15.20 "Event Injection".
5490 */
5491 if (!pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents)
5492 pVCpu->cpum.GstCtx.hwvirt.svm.fInterceptEvents = true;
5493 else
5494 {
5495 /*
5496 * Check and handle if the event being raised is intercepted.
5497 */
5498 VBOXSTRICTRC rcStrict0 = iemHandleSvmEventIntercept(pVCpu, u8Vector, fFlags, uErr, uCr2);
5499 if (rcStrict0 != VINF_SVM_INTERCEPT_NOT_ACTIVE)
5500 return rcStrict0;
5501 }
5502 }
5503#endif
5504
5505 /*
5506 * Set NMI blocking if necessary.
5507 */
5508 if ( fBlockNmi
5509 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
5510 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);
5511
5512 /*
5513 * Do recursion accounting.
5514 */
5515 uint8_t const uPrevXcpt = pVCpu->iem.s.uCurXcpt;
5516 uint32_t const fPrevXcpt = pVCpu->iem.s.fCurXcpt;
5517 if (pVCpu->iem.s.cXcptRecursions == 0)
5518 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx\n",
5519 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2));
5520 else
5521 {
5522 Log(("iemRaiseXcptOrInt: %#x at %04x:%RGv cbInstr=%#x fFlags=%#x uErr=%#x uCr2=%llx; prev=%#x depth=%d flags=%#x\n",
5523 u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, cbInstr, fFlags, uErr, uCr2, pVCpu->iem.s.uCurXcpt,
5524 pVCpu->iem.s.cXcptRecursions + 1, fPrevXcpt));
5525
5526 if (pVCpu->iem.s.cXcptRecursions >= 4)
5527 {
5528#ifdef DEBUG_bird
5529 AssertFailed();
5530#endif
5531 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Too many fault nestings.\n"));
5532 }
5533
5534 /*
5535 * Evaluate the sequence of recurring events.
5536 */
5537 IEMXCPTRAISE enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fPrevXcpt, uPrevXcpt, fFlags, u8Vector,
5538 NULL /* pXcptRaiseInfo */);
5539 if (enmRaise == IEMXCPTRAISE_CURRENT_XCPT)
5540 { /* likely */ }
5541 else if (enmRaise == IEMXCPTRAISE_DOUBLE_FAULT)
5542 {
5543 Log2(("iemRaiseXcptOrInt: Raising double fault. uPrevXcpt=%#x\n", uPrevXcpt));
5544 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR;
5545 u8Vector = X86_XCPT_DF;
5546 uErr = 0;
5547#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
5548 /* VMX nested-guest #DF intercept needs to be checked here. */
5549 if (IEM_VMX_IS_NON_ROOT_MODE(pVCpu))
5550 {
5551 VBOXSTRICTRC rcStrict0 = iemVmxVmexitEventDoubleFault(pVCpu);
5552 if (rcStrict0 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
5553 return rcStrict0;
5554 }
5555#endif
5556 /* SVM nested-guest #DF intercepts need to be checked now. See AMD spec. 15.12 "Exception Intercepts". */
5557 if (IEM_SVM_IS_XCPT_INTERCEPT_SET(pVCpu, X86_XCPT_DF))
5558 IEM_SVM_VMEXIT_RET(pVCpu, SVM_EXIT_XCPT_DF, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */);
5559 }
5560 else if (enmRaise == IEMXCPTRAISE_TRIPLE_FAULT)
5561 {
5562 Log2(("iemRaiseXcptOrInt: Raising triple fault. uPrevXcpt=%#x\n", uPrevXcpt));
5563 return iemInitiateCpuShutdown(pVCpu);
5564 }
5565 else if (enmRaise == IEMXCPTRAISE_CPU_HANG)
5566 {
5567 /* If a nested-guest enters an endless CPU loop condition, we'll emulate it; otherwise guru. */
5568 Log2(("iemRaiseXcptOrInt: CPU hang condition detected\n"));
5569 if ( !CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu))
5570 && !CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
5571 return VERR_EM_GUEST_CPU_HANG;
5572 }
5573 else
5574 {
5575 AssertMsgFailed(("Unexpected condition! enmRaise=%#x uPrevXcpt=%#x fPrevXcpt=%#x, u8Vector=%#x fFlags=%#x\n",
5576 enmRaise, uPrevXcpt, fPrevXcpt, u8Vector, fFlags));
5577 return VERR_IEM_IPE_9;
5578 }
5579
5580 /*
5581 * The 'EXT' bit is set when an exception occurs during deliver of an external
5582 * event (such as an interrupt or earlier exception)[1]. Privileged software
5583 * exception (INT1) also sets the EXT bit[2]. Exceptions generated by software
5584 * interrupts and INTO, INT3 instructions, the 'EXT' bit will not be set.
5585 *
5586 * [1] - Intel spec. 6.13 "Error Code"
5587 * [2] - Intel spec. 26.5.1.1 "Details of Vectored-Event Injection".
5588 * [3] - Intel Instruction reference for INT n.
5589 */
5590 if ( (fPrevXcpt & (IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_T_EXT_INT | IEM_XCPT_FLAGS_ICEBP_INSTR))
5591 && (fFlags & IEM_XCPT_FLAGS_ERR)
5592 && u8Vector != X86_XCPT_PF
5593 && u8Vector != X86_XCPT_DF)
5594 {
5595 uErr |= X86_TRAP_ERR_EXTERNAL;
5596 }
5597 }
5598
5599 pVCpu->iem.s.cXcptRecursions++;
5600 pVCpu->iem.s.uCurXcpt = u8Vector;
5601 pVCpu->iem.s.fCurXcpt = fFlags;
5602 pVCpu->iem.s.uCurXcptErr = uErr;
5603 pVCpu->iem.s.uCurXcptCr2 = uCr2;
5604
5605 /*
5606 * Extensive logging.
5607 */
5608#if defined(LOG_ENABLED) && defined(IN_RING3)
5609 if (LogIs3Enabled())
5610 {
5611 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_DR_MASK);
5612 PVM pVM = pVCpu->CTX_SUFF(pVM);
5613 char szRegs[4096];
5614 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
5615 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
5616 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
5617 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
5618 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
5619 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
5620 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
5621 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
5622 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
5623 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
5624 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
5625 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
5626 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
5627 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
5628 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
5629 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
5630 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
5631 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
5632 " efer=%016VR{efer}\n"
5633 " pat=%016VR{pat}\n"
5634 " sf_mask=%016VR{sf_mask}\n"
5635 "krnl_gs_base=%016VR{krnl_gs_base}\n"
5636 " lstar=%016VR{lstar}\n"
5637 " star=%016VR{star} cstar=%016VR{cstar}\n"
5638 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
5639 );
5640
5641 char szInstr[256];
5642 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
5643 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
5644 szInstr, sizeof(szInstr), NULL);
5645 Log3(("%s%s\n", szRegs, szInstr));
5646 }
5647#endif /* LOG_ENABLED */
5648
5649 /*
5650 * Call the mode specific worker function.
5651 */
5652 VBOXSTRICTRC rcStrict;
5653 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE))
5654 rcStrict = iemRaiseXcptOrIntInRealMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5655 else if (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_LMA)
5656 rcStrict = iemRaiseXcptOrIntInLongMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5657 else
5658 rcStrict = iemRaiseXcptOrIntInProtMode(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5659
5660 /* Flush the prefetch buffer. */
5661#ifdef IEM_WITH_CODE_TLB
5662 pVCpu->iem.s.pbInstrBuf = NULL;
5663#else
5664 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
5665#endif
5666
5667 /*
5668 * Unwind.
5669 */
5670 pVCpu->iem.s.cXcptRecursions--;
5671 pVCpu->iem.s.uCurXcpt = uPrevXcpt;
5672 pVCpu->iem.s.fCurXcpt = fPrevXcpt;
5673 Log(("iemRaiseXcptOrInt: returns %Rrc (vec=%#x); cs:rip=%04x:%RGv ss:rsp=%04x:%RGv cpl=%u depth=%d\n",
5674 VBOXSTRICTRC_VAL(rcStrict), u8Vector, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.esp, pVCpu->iem.s.uCpl,
5675 pVCpu->iem.s.cXcptRecursions + 1));
5676 return rcStrict;
5677}
5678
5679#ifdef IEM_WITH_SETJMP
5680/**
5681 * See iemRaiseXcptOrInt. Will not return.
5682 */
5683IEM_STATIC DECL_NO_RETURN(void)
5684iemRaiseXcptOrIntJmp(PVMCPUCC pVCpu,
5685 uint8_t cbInstr,
5686 uint8_t u8Vector,
5687 uint32_t fFlags,
5688 uint16_t uErr,
5689 uint64_t uCr2)
5690{
5691 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8Vector, fFlags, uErr, uCr2);
5692 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
5693}
5694#endif
5695
5696
5697/** \#DE - 00. */
5698DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDivideError(PVMCPUCC pVCpu)
5699{
5700 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5701}
5702
5703
5704/** \#DB - 01.
5705 * @note This automatically clear DR7.GD. */
5706DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDebugException(PVMCPUCC pVCpu)
5707{
5708 /** @todo set/clear RF. */
5709 pVCpu->cpum.GstCtx.dr[7] &= ~X86_DR7_GD;
5710 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DB, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5711}
5712
5713
5714/** \#BR - 05. */
5715DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseBoundRangeExceeded(PVMCPUCC pVCpu)
5716{
5717 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_BR, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5718}
5719
5720
5721/** \#UD - 06. */
5722DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseUndefinedOpcode(PVMCPUCC pVCpu)
5723{
5724 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5725}
5726
5727
5728/** \#NM - 07. */
5729DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseDeviceNotAvailable(PVMCPUCC pVCpu)
5730{
5731 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NM, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5732}
5733
5734
5735/** \#TS(err) - 0a. */
5736DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5737{
5738 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5739}
5740
5741
5742/** \#TS(tr) - 0a. */
5743DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultCurrentTSS(PVMCPUCC pVCpu)
5744{
5745 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5746 pVCpu->cpum.GstCtx.tr.Sel, 0);
5747}
5748
5749
5750/** \#TS(0) - 0a. */
5751DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFault0(PVMCPUCC pVCpu)
5752{
5753 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5754 0, 0);
5755}
5756
5757
5758/** \#TS(err) - 0a. */
5759DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseTaskSwitchFaultBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5760{
5761 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_TS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5762 uSel & X86_SEL_MASK_OFF_RPL, 0);
5763}
5764
5765
5766/** \#NP(err) - 0b. */
5767DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5768{
5769 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5770}
5771
5772
5773/** \#NP(sel) - 0b. */
5774DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5775{
5776 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_NP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5777 uSel & ~X86_SEL_RPL, 0);
5778}
5779
5780
5781/** \#SS(seg) - 0c. */
5782DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentBySelector(PVMCPUCC pVCpu, uint16_t uSel)
5783{
5784 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5785 uSel & ~X86_SEL_RPL, 0);
5786}
5787
5788
5789/** \#SS(err) - 0c. */
5790DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseStackSelectorNotPresentWithErr(PVMCPUCC pVCpu, uint16_t uErr)
5791{
5792 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_SS, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5793}
5794
5795
5796/** \#GP(n) - 0d. */
5797DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault(PVMCPUCC pVCpu, uint16_t uErr)
5798{
5799 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErr, 0);
5800}
5801
5802
5803/** \#GP(0) - 0d. */
5804DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFault0(PVMCPUCC pVCpu)
5805{
5806 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5807}
5808
5809#ifdef IEM_WITH_SETJMP
5810/** \#GP(0) - 0d. */
5811DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseGeneralProtectionFault0Jmp(PVMCPUCC pVCpu)
5812{
5813 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5814}
5815#endif
5816
5817
5818/** \#GP(sel) - 0d. */
5819DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseGeneralProtectionFaultBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5820{
5821 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
5822 Sel & ~X86_SEL_RPL, 0);
5823}
5824
5825
5826/** \#GP(0) - 0d. */
5827DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseNotCanonical(PVMCPUCC pVCpu)
5828{
5829 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5830}
5831
5832
5833/** \#GP(sel) - 0d. */
5834DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBounds(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5835{
5836 NOREF(iSegReg); NOREF(fAccess);
5837 return iemRaiseXcptOrInt(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5838 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5839}
5840
5841#ifdef IEM_WITH_SETJMP
5842/** \#GP(sel) - 0d, longjmp. */
5843DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsJmp(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5844{
5845 NOREF(iSegReg); NOREF(fAccess);
5846 iemRaiseXcptOrIntJmp(pVCpu, 0, iSegReg == X86_SREG_SS ? X86_XCPT_SS : X86_XCPT_GP,
5847 IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5848}
5849#endif
5850
5851/** \#GP(sel) - 0d. */
5852DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorBoundsBySelector(PVMCPUCC pVCpu, RTSEL Sel)
5853{
5854 NOREF(Sel);
5855 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5856}
5857
5858#ifdef IEM_WITH_SETJMP
5859/** \#GP(sel) - 0d, longjmp. */
5860DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorBoundsBySelectorJmp(PVMCPUCC pVCpu, RTSEL Sel)
5861{
5862 NOREF(Sel);
5863 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5864}
5865#endif
5866
5867
5868/** \#GP(sel) - 0d. */
5869DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseSelectorInvalidAccess(PVMCPUCC pVCpu, uint32_t iSegReg, uint32_t fAccess)
5870{
5871 NOREF(iSegReg); NOREF(fAccess);
5872 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5873}
5874
5875#ifdef IEM_WITH_SETJMP
5876/** \#GP(sel) - 0d, longjmp. */
5877DECL_NO_INLINE(IEM_STATIC, DECL_NO_RETURN(void)) iemRaiseSelectorInvalidAccessJmp(PVMCPUCC pVCpu, uint32_t iSegReg,
5878 uint32_t fAccess)
5879{
5880 NOREF(iSegReg); NOREF(fAccess);
5881 iemRaiseXcptOrIntJmp(pVCpu, 0, X86_XCPT_GP, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, 0, 0);
5882}
5883#endif
5884
5885
5886/** \#PF(n) - 0e. */
5887DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaisePageFault(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5888{
5889 uint16_t uErr;
5890 switch (rc)
5891 {
5892 case VERR_PAGE_NOT_PRESENT:
5893 case VERR_PAGE_TABLE_NOT_PRESENT:
5894 case VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT:
5895 case VERR_PAGE_MAP_LEVEL4_NOT_PRESENT:
5896 uErr = 0;
5897 break;
5898
5899 default:
5900 AssertMsgFailed(("%Rrc\n", rc));
5901 RT_FALL_THRU();
5902 case VERR_ACCESS_DENIED:
5903 uErr = X86_TRAP_PF_P;
5904 break;
5905
5906 /** @todo reserved */
5907 }
5908
5909 if (pVCpu->iem.s.uCpl == 3)
5910 uErr |= X86_TRAP_PF_US;
5911
5912 if ( (fAccess & IEM_ACCESS_WHAT_MASK) == IEM_ACCESS_WHAT_CODE
5913 && ( (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
5914 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) ) )
5915 uErr |= X86_TRAP_PF_ID;
5916
5917#if 0 /* This is so much non-sense, really. Why was it done like that? */
5918 /* Note! RW access callers reporting a WRITE protection fault, will clear
5919 the READ flag before calling. So, read-modify-write accesses (RW)
5920 can safely be reported as READ faults. */
5921 if ((fAccess & (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_TYPE_READ)) == IEM_ACCESS_TYPE_WRITE)
5922 uErr |= X86_TRAP_PF_RW;
5923#else
5924 if (fAccess & IEM_ACCESS_TYPE_WRITE)
5925 {
5926 /// @todo r=bird: bs3-cpu-basic-2 wants X86_TRAP_PF_RW for xchg and cmpxchg
5927 /// (regardless of outcome of the comparison in the latter case).
5928 //if (!(fAccess & IEM_ACCESS_TYPE_READ))
5929 uErr |= X86_TRAP_PF_RW;
5930 }
5931#endif
5932
5933 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_PF, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR | IEM_XCPT_FLAGS_CR2,
5934 uErr, GCPtrWhere);
5935}
5936
5937#ifdef IEM_WITH_SETJMP
5938/** \#PF(n) - 0e, longjmp. */
5939IEM_STATIC DECL_NO_RETURN(void) iemRaisePageFaultJmp(PVMCPUCC pVCpu, RTGCPTR GCPtrWhere, uint32_t fAccess, int rc)
5940{
5941 longjmp(*CTX_SUFF(pVCpu->iem.s.pJmpBuf), VBOXSTRICTRC_VAL(iemRaisePageFault(pVCpu, GCPtrWhere, fAccess, rc)));
5942}
5943#endif
5944
5945
5946/** \#MF(0) - 10. */
5947DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseMathFault(PVMCPUCC pVCpu)
5948{
5949 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_MF, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5950}
5951
5952
5953/** \#AC(0) - 11. */
5954DECL_NO_INLINE(IEM_STATIC, VBOXSTRICTRC) iemRaiseAlignmentCheckException(PVMCPUCC pVCpu)
5955{
5956 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_AC, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5957}
5958
5959
5960/**
5961 * Macro for calling iemCImplRaiseDivideError().
5962 *
5963 * This enables us to add/remove arguments and force different levels of
5964 * inlining as we wish.
5965 *
5966 * @return Strict VBox status code.
5967 */
5968#define IEMOP_RAISE_DIVIDE_ERROR() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseDivideError)
5969IEM_CIMPL_DEF_0(iemCImplRaiseDivideError)
5970{
5971 NOREF(cbInstr);
5972 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_DE, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5973}
5974
5975
5976/**
5977 * Macro for calling iemCImplRaiseInvalidLockPrefix().
5978 *
5979 * This enables us to add/remove arguments and force different levels of
5980 * inlining as we wish.
5981 *
5982 * @return Strict VBox status code.
5983 */
5984#define IEMOP_RAISE_INVALID_LOCK_PREFIX() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidLockPrefix)
5985IEM_CIMPL_DEF_0(iemCImplRaiseInvalidLockPrefix)
5986{
5987 NOREF(cbInstr);
5988 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
5989}
5990
5991
5992/**
5993 * Macro for calling iemCImplRaiseInvalidOpcode().
5994 *
5995 * This enables us to add/remove arguments and force different levels of
5996 * inlining as we wish.
5997 *
5998 * @return Strict VBox status code.
5999 */
6000#define IEMOP_RAISE_INVALID_OPCODE() IEM_MC_DEFER_TO_CIMPL_0(iemCImplRaiseInvalidOpcode)
6001IEM_CIMPL_DEF_0(iemCImplRaiseInvalidOpcode)
6002{
6003 NOREF(cbInstr);
6004 return iemRaiseXcptOrInt(pVCpu, 0, X86_XCPT_UD, IEM_XCPT_FLAGS_T_CPU_XCPT, 0, 0);
6005}
6006
6007
6008/** @} */
6009
6010
6011/*
6012 *
6013 * Helpers routines.
6014 * Helpers routines.
6015 * Helpers routines.
6016 *
6017 */
6018
6019/**
6020 * Recalculates the effective operand size.
6021 *
6022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6023 */
6024IEM_STATIC void iemRecalEffOpSize(PVMCPUCC pVCpu)
6025{
6026 switch (pVCpu->iem.s.enmCpuMode)
6027 {
6028 case IEMMODE_16BIT:
6029 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_32BIT : IEMMODE_16BIT;
6030 break;
6031 case IEMMODE_32BIT:
6032 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SIZE_OP ? IEMMODE_16BIT : IEMMODE_32BIT;
6033 break;
6034 case IEMMODE_64BIT:
6035 switch (pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP))
6036 {
6037 case 0:
6038 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize;
6039 break;
6040 case IEM_OP_PRF_SIZE_OP:
6041 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6042 break;
6043 case IEM_OP_PRF_SIZE_REX_W:
6044 case IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP:
6045 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6046 break;
6047 }
6048 break;
6049 default:
6050 AssertFailed();
6051 }
6052}
6053
6054
6055/**
6056 * Sets the default operand size to 64-bit and recalculates the effective
6057 * operand size.
6058 *
6059 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6060 */
6061IEM_STATIC void iemRecalEffOpSize64Default(PVMCPUCC pVCpu)
6062{
6063 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6064 pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT;
6065 if ((pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_REX_W | IEM_OP_PRF_SIZE_OP)) != IEM_OP_PRF_SIZE_OP)
6066 pVCpu->iem.s.enmEffOpSize = IEMMODE_64BIT;
6067 else
6068 pVCpu->iem.s.enmEffOpSize = IEMMODE_16BIT;
6069}
6070
6071
6072/*
6073 *
6074 * Common opcode decoders.
6075 * Common opcode decoders.
6076 * Common opcode decoders.
6077 *
6078 */
6079//#include <iprt/mem.h>
6080
6081/**
6082 * Used to add extra details about a stub case.
6083 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6084 */
6085IEM_STATIC void iemOpStubMsg2(PVMCPUCC pVCpu)
6086{
6087#if defined(LOG_ENABLED) && defined(IN_RING3)
6088 PVM pVM = pVCpu->CTX_SUFF(pVM);
6089 char szRegs[4096];
6090 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),
6091 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"
6092 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"
6093 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"
6094 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"
6095 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"
6096 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"
6097 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"
6098 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"
6099 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"
6100 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"
6101 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"
6102 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"
6103 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"
6104 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"
6105 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"
6106 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"
6107 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"
6108 " efer=%016VR{efer}\n"
6109 " pat=%016VR{pat}\n"
6110 " sf_mask=%016VR{sf_mask}\n"
6111 "krnl_gs_base=%016VR{krnl_gs_base}\n"
6112 " lstar=%016VR{lstar}\n"
6113 " star=%016VR{star} cstar=%016VR{cstar}\n"
6114 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"
6115 );
6116
6117 char szInstr[256];
6118 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,
6119 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
6120 szInstr, sizeof(szInstr), NULL);
6121
6122 RTAssertMsg2Weak("%s%s\n", szRegs, szInstr);
6123#else
6124 RTAssertMsg2Weak("cs:rip=%04x:%RX64\n", pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip);
6125#endif
6126}
6127
6128/**
6129 * Complains about a stub.
6130 *
6131 * Providing two versions of this macro, one for daily use and one for use when
6132 * working on IEM.
6133 */
6134#if 0
6135# define IEMOP_BITCH_ABOUT_STUB() \
6136 do { \
6137 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__); \
6138 iemOpStubMsg2(pVCpu); \
6139 RTAssertPanic(); \
6140 } while (0)
6141#else
6142# define IEMOP_BITCH_ABOUT_STUB() Log(("Stub: %s (line %d)\n", __FUNCTION__, __LINE__));
6143#endif
6144
6145/** Stubs an opcode. */
6146#define FNIEMOP_STUB(a_Name) \
6147 FNIEMOP_DEF(a_Name) \
6148 { \
6149 RT_NOREF_PV(pVCpu); \
6150 IEMOP_BITCH_ABOUT_STUB(); \
6151 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6152 } \
6153 typedef int ignore_semicolon
6154
6155/** Stubs an opcode. */
6156#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
6157 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6158 { \
6159 RT_NOREF_PV(pVCpu); \
6160 RT_NOREF_PV(a_Name0); \
6161 IEMOP_BITCH_ABOUT_STUB(); \
6162 return VERR_IEM_INSTR_NOT_IMPLEMENTED; \
6163 } \
6164 typedef int ignore_semicolon
6165
6166/** Stubs an opcode which currently should raise \#UD. */
6167#define FNIEMOP_UD_STUB(a_Name) \
6168 FNIEMOP_DEF(a_Name) \
6169 { \
6170 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6171 return IEMOP_RAISE_INVALID_OPCODE(); \
6172 } \
6173 typedef int ignore_semicolon
6174
6175/** Stubs an opcode which currently should raise \#UD. */
6176#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
6177 FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
6178 { \
6179 RT_NOREF_PV(pVCpu); \
6180 RT_NOREF_PV(a_Name0); \
6181 Log(("Unsupported instruction %Rfn\n", __FUNCTION__)); \
6182 return IEMOP_RAISE_INVALID_OPCODE(); \
6183 } \
6184 typedef int ignore_semicolon
6185
6186
6187
6188/** @name Register Access.
6189 * @{
6190 */
6191
6192/**
6193 * Gets a reference (pointer) to the specified hidden segment register.
6194 *
6195 * @returns Hidden register reference.
6196 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6197 * @param iSegReg The segment register.
6198 */
6199IEM_STATIC PCPUMSELREG iemSRegGetHid(PVMCPUCC pVCpu, uint8_t iSegReg)
6200{
6201 Assert(iSegReg < X86_SREG_COUNT);
6202 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6203 PCPUMSELREG pSReg = &pVCpu->cpum.GstCtx.aSRegs[iSegReg];
6204
6205 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6206 return pSReg;
6207}
6208
6209
6210/**
6211 * Ensures that the given hidden segment register is up to date.
6212 *
6213 * @returns Hidden register reference.
6214 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6215 * @param pSReg The segment register.
6216 */
6217IEM_STATIC PCPUMSELREG iemSRegUpdateHid(PVMCPUCC pVCpu, PCPUMSELREG pSReg)
6218{
6219 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, pSReg));
6220 NOREF(pVCpu);
6221 return pSReg;
6222}
6223
6224
6225/**
6226 * Gets a reference (pointer) to the specified segment register (the selector
6227 * value).
6228 *
6229 * @returns Pointer to the selector variable.
6230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6231 * @param iSegReg The segment register.
6232 */
6233DECLINLINE(uint16_t *) iemSRegRef(PVMCPUCC pVCpu, uint8_t iSegReg)
6234{
6235 Assert(iSegReg < X86_SREG_COUNT);
6236 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6237 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6238}
6239
6240
6241/**
6242 * Fetches the selector value of a segment register.
6243 *
6244 * @returns The selector value.
6245 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6246 * @param iSegReg The segment register.
6247 */
6248DECLINLINE(uint16_t) iemSRegFetchU16(PVMCPUCC pVCpu, uint8_t iSegReg)
6249{
6250 Assert(iSegReg < X86_SREG_COUNT);
6251 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6252 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].Sel;
6253}
6254
6255
6256/**
6257 * Fetches the base address value of a segment register.
6258 *
6259 * @returns The selector value.
6260 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6261 * @param iSegReg The segment register.
6262 */
6263DECLINLINE(uint64_t) iemSRegBaseFetchU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6264{
6265 Assert(iSegReg < X86_SREG_COUNT);
6266 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6267 return pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6268}
6269
6270
6271/**
6272 * Gets a reference (pointer) to the specified general purpose register.
6273 *
6274 * @returns Register reference.
6275 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6276 * @param iReg The general purpose register.
6277 */
6278DECLINLINE(void *) iemGRegRef(PVMCPUCC pVCpu, uint8_t iReg)
6279{
6280 Assert(iReg < 16);
6281 return &pVCpu->cpum.GstCtx.aGRegs[iReg];
6282}
6283
6284
6285/**
6286 * Gets a reference (pointer) to the specified 8-bit general purpose register.
6287 *
6288 * Because of AH, CH, DH and BH we cannot use iemGRegRef directly here.
6289 *
6290 * @returns Register reference.
6291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6292 * @param iReg The register.
6293 */
6294DECLINLINE(uint8_t *) iemGRegRefU8(PVMCPUCC pVCpu, uint8_t iReg)
6295{
6296 if (iReg < 4 || (pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX))
6297 {
6298 Assert(iReg < 16);
6299 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u8;
6300 }
6301 /* high 8-bit register. */
6302 Assert(iReg < 8);
6303 return &pVCpu->cpum.GstCtx.aGRegs[iReg & 3].bHi;
6304}
6305
6306
6307/**
6308 * Gets a reference (pointer) to the specified 16-bit general purpose register.
6309 *
6310 * @returns Register reference.
6311 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6312 * @param iReg The register.
6313 */
6314DECLINLINE(uint16_t *) iemGRegRefU16(PVMCPUCC pVCpu, uint8_t iReg)
6315{
6316 Assert(iReg < 16);
6317 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6318}
6319
6320
6321/**
6322 * Gets a reference (pointer) to the specified 32-bit general purpose register.
6323 *
6324 * @returns Register reference.
6325 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6326 * @param iReg The register.
6327 */
6328DECLINLINE(uint32_t *) iemGRegRefU32(PVMCPUCC pVCpu, uint8_t iReg)
6329{
6330 Assert(iReg < 16);
6331 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6332}
6333
6334
6335/**
6336 * Gets a reference (pointer) to the specified 64-bit general purpose register.
6337 *
6338 * @returns Register reference.
6339 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6340 * @param iReg The register.
6341 */
6342DECLINLINE(uint64_t *) iemGRegRefU64(PVMCPUCC pVCpu, uint8_t iReg)
6343{
6344 Assert(iReg < 64);
6345 return &pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6346}
6347
6348
6349/**
6350 * Gets a reference (pointer) to the specified segment register's base address.
6351 *
6352 * @returns Segment register base address reference.
6353 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6354 * @param iSegReg The segment selector.
6355 */
6356DECLINLINE(uint64_t *) iemSRegBaseRefU64(PVMCPUCC pVCpu, uint8_t iSegReg)
6357{
6358 Assert(iSegReg < X86_SREG_COUNT);
6359 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
6360 return &pVCpu->cpum.GstCtx.aSRegs[iSegReg].u64Base;
6361}
6362
6363
6364/**
6365 * Fetches the value of a 8-bit general purpose register.
6366 *
6367 * @returns The register value.
6368 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6369 * @param iReg The register.
6370 */
6371DECLINLINE(uint8_t) iemGRegFetchU8(PVMCPUCC pVCpu, uint8_t iReg)
6372{
6373 return *iemGRegRefU8(pVCpu, iReg);
6374}
6375
6376
6377/**
6378 * Fetches the value of a 16-bit general purpose register.
6379 *
6380 * @returns The register value.
6381 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6382 * @param iReg The register.
6383 */
6384DECLINLINE(uint16_t) iemGRegFetchU16(PVMCPUCC pVCpu, uint8_t iReg)
6385{
6386 Assert(iReg < 16);
6387 return pVCpu->cpum.GstCtx.aGRegs[iReg].u16;
6388}
6389
6390
6391/**
6392 * Fetches the value of a 32-bit general purpose register.
6393 *
6394 * @returns The register value.
6395 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6396 * @param iReg The register.
6397 */
6398DECLINLINE(uint32_t) iemGRegFetchU32(PVMCPUCC pVCpu, uint8_t iReg)
6399{
6400 Assert(iReg < 16);
6401 return pVCpu->cpum.GstCtx.aGRegs[iReg].u32;
6402}
6403
6404
6405/**
6406 * Fetches the value of a 64-bit general purpose register.
6407 *
6408 * @returns The register value.
6409 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6410 * @param iReg The register.
6411 */
6412DECLINLINE(uint64_t) iemGRegFetchU64(PVMCPUCC pVCpu, uint8_t iReg)
6413{
6414 Assert(iReg < 16);
6415 return pVCpu->cpum.GstCtx.aGRegs[iReg].u64;
6416}
6417
6418
6419/**
6420 * Adds a 8-bit signed jump offset to RIP/EIP/IP.
6421 *
6422 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6423 * segment limit.
6424 *
6425 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6426 * @param offNextInstr The offset of the next instruction.
6427 */
6428IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS8(PVMCPUCC pVCpu, int8_t offNextInstr)
6429{
6430 switch (pVCpu->iem.s.enmEffOpSize)
6431 {
6432 case IEMMODE_16BIT:
6433 {
6434 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6435 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6436 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6437 return iemRaiseGeneralProtectionFault0(pVCpu);
6438 pVCpu->cpum.GstCtx.rip = uNewIp;
6439 break;
6440 }
6441
6442 case IEMMODE_32BIT:
6443 {
6444 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6445 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6446
6447 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6448 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6449 return iemRaiseGeneralProtectionFault0(pVCpu);
6450 pVCpu->cpum.GstCtx.rip = uNewEip;
6451 break;
6452 }
6453
6454 case IEMMODE_64BIT:
6455 {
6456 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6457
6458 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6459 if (!IEM_IS_CANONICAL(uNewRip))
6460 return iemRaiseGeneralProtectionFault0(pVCpu);
6461 pVCpu->cpum.GstCtx.rip = uNewRip;
6462 break;
6463 }
6464
6465 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6466 }
6467
6468 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6469
6470#ifndef IEM_WITH_CODE_TLB
6471 /* Flush the prefetch buffer. */
6472 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6473#endif
6474
6475 return VINF_SUCCESS;
6476}
6477
6478
6479/**
6480 * Adds a 16-bit signed jump offset to RIP/EIP/IP.
6481 *
6482 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6483 * segment limit.
6484 *
6485 * @returns Strict VBox status code.
6486 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6487 * @param offNextInstr The offset of the next instruction.
6488 */
6489IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS16(PVMCPUCC pVCpu, int16_t offNextInstr)
6490{
6491 Assert(pVCpu->iem.s.enmEffOpSize == IEMMODE_16BIT);
6492
6493 uint16_t uNewIp = pVCpu->cpum.GstCtx.ip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6494 if ( uNewIp > pVCpu->cpum.GstCtx.cs.u32Limit
6495 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6496 return iemRaiseGeneralProtectionFault0(pVCpu);
6497 /** @todo Test 16-bit jump in 64-bit mode. possible? */
6498 pVCpu->cpum.GstCtx.rip = uNewIp;
6499 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6500
6501#ifndef IEM_WITH_CODE_TLB
6502 /* Flush the prefetch buffer. */
6503 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6504#endif
6505
6506 return VINF_SUCCESS;
6507}
6508
6509
6510/**
6511 * Adds a 32-bit signed jump offset to RIP/EIP/IP.
6512 *
6513 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6514 * segment limit.
6515 *
6516 * @returns Strict VBox status code.
6517 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6518 * @param offNextInstr The offset of the next instruction.
6519 */
6520IEM_STATIC VBOXSTRICTRC iemRegRipRelativeJumpS32(PVMCPUCC pVCpu, int32_t offNextInstr)
6521{
6522 Assert(pVCpu->iem.s.enmEffOpSize != IEMMODE_16BIT);
6523
6524 if (pVCpu->iem.s.enmEffOpSize == IEMMODE_32BIT)
6525 {
6526 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX); Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6527
6528 uint32_t uNewEip = pVCpu->cpum.GstCtx.eip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6529 if (uNewEip > pVCpu->cpum.GstCtx.cs.u32Limit)
6530 return iemRaiseGeneralProtectionFault0(pVCpu);
6531 pVCpu->cpum.GstCtx.rip = uNewEip;
6532 }
6533 else
6534 {
6535 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6536
6537 uint64_t uNewRip = pVCpu->cpum.GstCtx.rip + offNextInstr + IEM_GET_INSTR_LEN(pVCpu);
6538 if (!IEM_IS_CANONICAL(uNewRip))
6539 return iemRaiseGeneralProtectionFault0(pVCpu);
6540 pVCpu->cpum.GstCtx.rip = uNewRip;
6541 }
6542 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6543
6544#ifndef IEM_WITH_CODE_TLB
6545 /* Flush the prefetch buffer. */
6546 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6547#endif
6548
6549 return VINF_SUCCESS;
6550}
6551
6552
6553/**
6554 * Performs a near jump to the specified address.
6555 *
6556 * May raise a \#GP(0) if the new RIP is non-canonical or outside the code
6557 * segment limit.
6558 *
6559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6560 * @param uNewRip The new RIP value.
6561 */
6562IEM_STATIC VBOXSTRICTRC iemRegRipJump(PVMCPUCC pVCpu, uint64_t uNewRip)
6563{
6564 switch (pVCpu->iem.s.enmEffOpSize)
6565 {
6566 case IEMMODE_16BIT:
6567 {
6568 Assert(uNewRip <= UINT16_MAX);
6569 if ( uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit
6570 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) /* no need to check for non-canonical. */
6571 return iemRaiseGeneralProtectionFault0(pVCpu);
6572 /** @todo Test 16-bit jump in 64-bit mode. */
6573 pVCpu->cpum.GstCtx.rip = uNewRip;
6574 break;
6575 }
6576
6577 case IEMMODE_32BIT:
6578 {
6579 Assert(uNewRip <= UINT32_MAX);
6580 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6581 Assert(pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT);
6582
6583 if (uNewRip > pVCpu->cpum.GstCtx.cs.u32Limit)
6584 return iemRaiseGeneralProtectionFault0(pVCpu);
6585 pVCpu->cpum.GstCtx.rip = uNewRip;
6586 break;
6587 }
6588
6589 case IEMMODE_64BIT:
6590 {
6591 Assert(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
6592
6593 if (!IEM_IS_CANONICAL(uNewRip))
6594 return iemRaiseGeneralProtectionFault0(pVCpu);
6595 pVCpu->cpum.GstCtx.rip = uNewRip;
6596 break;
6597 }
6598
6599 IEM_NOT_REACHED_DEFAULT_CASE_RET();
6600 }
6601
6602 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6603
6604#ifndef IEM_WITH_CODE_TLB
6605 /* Flush the prefetch buffer. */
6606 pVCpu->iem.s.cbOpcode = IEM_GET_INSTR_LEN(pVCpu);
6607#endif
6608
6609 return VINF_SUCCESS;
6610}
6611
6612
6613/**
6614 * Get the address of the top of the stack.
6615 *
6616 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6617 */
6618DECLINLINE(RTGCPTR) iemRegGetEffRsp(PCVMCPU pVCpu)
6619{
6620 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6621 return pVCpu->cpum.GstCtx.rsp;
6622 if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6623 return pVCpu->cpum.GstCtx.esp;
6624 return pVCpu->cpum.GstCtx.sp;
6625}
6626
6627
6628/**
6629 * Updates the RIP/EIP/IP to point to the next instruction.
6630 *
6631 * This function leaves the EFLAGS.RF flag alone.
6632 *
6633 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6634 * @param cbInstr The number of bytes to add.
6635 */
6636IEM_STATIC void iemRegAddToRipKeepRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6637{
6638 switch (pVCpu->iem.s.enmCpuMode)
6639 {
6640 case IEMMODE_16BIT:
6641 Assert(pVCpu->cpum.GstCtx.rip <= UINT16_MAX);
6642 pVCpu->cpum.GstCtx.eip += cbInstr;
6643 pVCpu->cpum.GstCtx.eip &= UINT32_C(0xffff);
6644 break;
6645
6646 case IEMMODE_32BIT:
6647 pVCpu->cpum.GstCtx.eip += cbInstr;
6648 Assert(pVCpu->cpum.GstCtx.rip <= UINT32_MAX);
6649 break;
6650
6651 case IEMMODE_64BIT:
6652 pVCpu->cpum.GstCtx.rip += cbInstr;
6653 break;
6654 default: AssertFailed();
6655 }
6656}
6657
6658
6659#if 0
6660/**
6661 * Updates the RIP/EIP/IP to point to the next instruction.
6662 *
6663 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6664 */
6665IEM_STATIC void iemRegUpdateRipKeepRF(PVMCPUCC pVCpu)
6666{
6667 return iemRegAddToRipKeepRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6668}
6669#endif
6670
6671
6672
6673/**
6674 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6675 *
6676 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6677 * @param cbInstr The number of bytes to add.
6678 */
6679IEM_STATIC void iemRegAddToRipAndClearRF(PVMCPUCC pVCpu, uint8_t cbInstr)
6680{
6681 pVCpu->cpum.GstCtx.eflags.Bits.u1RF = 0;
6682
6683 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
6684#if ARCH_BITS >= 64
6685 static uint64_t const s_aRipMasks[] = { UINT64_C(0xffffffff), UINT64_C(0xffffffff), UINT64_MAX };
6686 Assert(pVCpu->cpum.GstCtx.rip <= s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode]);
6687 pVCpu->cpum.GstCtx.rip = (pVCpu->cpum.GstCtx.rip + cbInstr) & s_aRipMasks[(unsigned)pVCpu->iem.s.enmCpuMode];
6688#else
6689 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6690 pVCpu->cpum.GstCtx.rip += cbInstr;
6691 else
6692 pVCpu->cpum.GstCtx.eip += cbInstr;
6693#endif
6694}
6695
6696
6697/**
6698 * Updates the RIP/EIP/IP to point to the next instruction and clears EFLAGS.RF.
6699 *
6700 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6701 */
6702IEM_STATIC void iemRegUpdateRipAndClearRF(PVMCPUCC pVCpu)
6703{
6704 return iemRegAddToRipAndClearRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
6705}
6706
6707
6708/**
6709 * Adds to the stack pointer.
6710 *
6711 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6712 * @param cbToAdd The number of bytes to add (8-bit!).
6713 */
6714DECLINLINE(void) iemRegAddToRsp(PVMCPUCC pVCpu, uint8_t cbToAdd)
6715{
6716 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6717 pVCpu->cpum.GstCtx.rsp += cbToAdd;
6718 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6719 pVCpu->cpum.GstCtx.esp += cbToAdd;
6720 else
6721 pVCpu->cpum.GstCtx.sp += cbToAdd;
6722}
6723
6724
6725/**
6726 * Subtracts from the stack pointer.
6727 *
6728 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6729 * @param cbToSub The number of bytes to subtract (8-bit!).
6730 */
6731DECLINLINE(void) iemRegSubFromRsp(PVMCPUCC pVCpu, uint8_t cbToSub)
6732{
6733 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6734 pVCpu->cpum.GstCtx.rsp -= cbToSub;
6735 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6736 pVCpu->cpum.GstCtx.esp -= cbToSub;
6737 else
6738 pVCpu->cpum.GstCtx.sp -= cbToSub;
6739}
6740
6741
6742/**
6743 * Adds to the temporary stack pointer.
6744 *
6745 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6746 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6747 * @param cbToAdd The number of bytes to add (16-bit).
6748 */
6749DECLINLINE(void) iemRegAddToRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToAdd)
6750{
6751 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6752 pTmpRsp->u += cbToAdd;
6753 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6754 pTmpRsp->DWords.dw0 += cbToAdd;
6755 else
6756 pTmpRsp->Words.w0 += cbToAdd;
6757}
6758
6759
6760/**
6761 * Subtracts from the temporary stack pointer.
6762 *
6763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6764 * @param pTmpRsp The temporary SP/ESP/RSP to update.
6765 * @param cbToSub The number of bytes to subtract.
6766 * @remarks The @a cbToSub argument *MUST* be 16-bit, iemCImpl_enter is
6767 * expecting that.
6768 */
6769DECLINLINE(void) iemRegSubFromRspEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint16_t cbToSub)
6770{
6771 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6772 pTmpRsp->u -= cbToSub;
6773 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6774 pTmpRsp->DWords.dw0 -= cbToSub;
6775 else
6776 pTmpRsp->Words.w0 -= cbToSub;
6777}
6778
6779
6780/**
6781 * Calculates the effective stack address for a push of the specified size as
6782 * well as the new RSP value (upper bits may be masked).
6783 *
6784 * @returns Effective stack addressf for the push.
6785 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6786 * @param cbItem The size of the stack item to pop.
6787 * @param puNewRsp Where to return the new RSP value.
6788 */
6789DECLINLINE(RTGCPTR) iemRegGetRspForPush(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6790{
6791 RTUINT64U uTmpRsp;
6792 RTGCPTR GCPtrTop;
6793 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6794
6795 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6796 GCPtrTop = uTmpRsp.u -= cbItem;
6797 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6798 GCPtrTop = uTmpRsp.DWords.dw0 -= cbItem;
6799 else
6800 GCPtrTop = uTmpRsp.Words.w0 -= cbItem;
6801 *puNewRsp = uTmpRsp.u;
6802 return GCPtrTop;
6803}
6804
6805
6806/**
6807 * Gets the current stack pointer and calculates the value after a pop of the
6808 * specified size.
6809 *
6810 * @returns Current stack pointer.
6811 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6812 * @param cbItem The size of the stack item to pop.
6813 * @param puNewRsp Where to return the new RSP value.
6814 */
6815DECLINLINE(RTGCPTR) iemRegGetRspForPop(PCVMCPU pVCpu, uint8_t cbItem, uint64_t *puNewRsp)
6816{
6817 RTUINT64U uTmpRsp;
6818 RTGCPTR GCPtrTop;
6819 uTmpRsp.u = pVCpu->cpum.GstCtx.rsp;
6820
6821 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6822 {
6823 GCPtrTop = uTmpRsp.u;
6824 uTmpRsp.u += cbItem;
6825 }
6826 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6827 {
6828 GCPtrTop = uTmpRsp.DWords.dw0;
6829 uTmpRsp.DWords.dw0 += cbItem;
6830 }
6831 else
6832 {
6833 GCPtrTop = uTmpRsp.Words.w0;
6834 uTmpRsp.Words.w0 += cbItem;
6835 }
6836 *puNewRsp = uTmpRsp.u;
6837 return GCPtrTop;
6838}
6839
6840
6841/**
6842 * Calculates the effective stack address for a push of the specified size as
6843 * well as the new temporary RSP value (upper bits may be masked).
6844 *
6845 * @returns Effective stack addressf for the push.
6846 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6847 * @param pTmpRsp The temporary stack pointer. This is updated.
6848 * @param cbItem The size of the stack item to pop.
6849 */
6850DECLINLINE(RTGCPTR) iemRegGetRspForPushEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6851{
6852 RTGCPTR GCPtrTop;
6853
6854 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6855 GCPtrTop = pTmpRsp->u -= cbItem;
6856 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6857 GCPtrTop = pTmpRsp->DWords.dw0 -= cbItem;
6858 else
6859 GCPtrTop = pTmpRsp->Words.w0 -= cbItem;
6860 return GCPtrTop;
6861}
6862
6863
6864/**
6865 * Gets the effective stack address for a pop of the specified size and
6866 * calculates and updates the temporary RSP.
6867 *
6868 * @returns Current stack pointer.
6869 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6870 * @param pTmpRsp The temporary stack pointer. This is updated.
6871 * @param cbItem The size of the stack item to pop.
6872 */
6873DECLINLINE(RTGCPTR) iemRegGetRspForPopEx(PCVMCPU pVCpu, PRTUINT64U pTmpRsp, uint8_t cbItem)
6874{
6875 RTGCPTR GCPtrTop;
6876 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
6877 {
6878 GCPtrTop = pTmpRsp->u;
6879 pTmpRsp->u += cbItem;
6880 }
6881 else if (pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
6882 {
6883 GCPtrTop = pTmpRsp->DWords.dw0;
6884 pTmpRsp->DWords.dw0 += cbItem;
6885 }
6886 else
6887 {
6888 GCPtrTop = pTmpRsp->Words.w0;
6889 pTmpRsp->Words.w0 += cbItem;
6890 }
6891 return GCPtrTop;
6892}
6893
6894/** @} */
6895
6896
6897/** @name FPU access and helpers.
6898 *
6899 * @{
6900 */
6901
6902
6903/**
6904 * Hook for preparing to use the host FPU.
6905 *
6906 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6907 *
6908 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6909 */
6910DECLINLINE(void) iemFpuPrepareUsage(PVMCPUCC pVCpu)
6911{
6912#ifdef IN_RING3
6913 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6914#else
6915 CPUMRZFpuStatePrepareHostCpuForUse(pVCpu);
6916#endif
6917 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6918}
6919
6920
6921/**
6922 * Hook for preparing to use the host FPU for SSE.
6923 *
6924 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6925 *
6926 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6927 */
6928DECLINLINE(void) iemFpuPrepareUsageSse(PVMCPUCC pVCpu)
6929{
6930 iemFpuPrepareUsage(pVCpu);
6931}
6932
6933
6934/**
6935 * Hook for preparing to use the host FPU for AVX.
6936 *
6937 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6938 *
6939 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6940 */
6941DECLINLINE(void) iemFpuPrepareUsageAvx(PVMCPUCC pVCpu)
6942{
6943 iemFpuPrepareUsage(pVCpu);
6944}
6945
6946
6947/**
6948 * Hook for actualizing the guest FPU state before the interpreter reads it.
6949 *
6950 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6951 *
6952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6953 */
6954DECLINLINE(void) iemFpuActualizeStateForRead(PVMCPUCC pVCpu)
6955{
6956#ifdef IN_RING3
6957 NOREF(pVCpu);
6958#else
6959 CPUMRZFpuStateActualizeForRead(pVCpu);
6960#endif
6961 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6962}
6963
6964
6965/**
6966 * Hook for actualizing the guest FPU state before the interpreter changes it.
6967 *
6968 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6969 *
6970 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6971 */
6972DECLINLINE(void) iemFpuActualizeStateForChange(PVMCPUCC pVCpu)
6973{
6974#ifdef IN_RING3
6975 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
6976#else
6977 CPUMRZFpuStateActualizeForChange(pVCpu);
6978#endif
6979 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6980}
6981
6982
6983/**
6984 * Hook for actualizing the guest XMM0..15 and MXCSR register state for read
6985 * only.
6986 *
6987 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
6988 *
6989 * @param pVCpu The cross context virtual CPU structure of the calling thread.
6990 */
6991DECLINLINE(void) iemFpuActualizeSseStateForRead(PVMCPUCC pVCpu)
6992{
6993#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
6994 NOREF(pVCpu);
6995#else
6996 CPUMRZFpuStateActualizeSseForRead(pVCpu);
6997#endif
6998 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
6999}
7000
7001
7002/**
7003 * Hook for actualizing the guest XMM0..15 and MXCSR register state for
7004 * read+write.
7005 *
7006 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7007 *
7008 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7009 */
7010DECLINLINE(void) iemFpuActualizeSseStateForChange(PVMCPUCC pVCpu)
7011{
7012#if defined(IN_RING3) || defined(VBOX_WITH_KERNEL_USING_XMM)
7013 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7014#else
7015 CPUMRZFpuStateActualizeForChange(pVCpu);
7016#endif
7017 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7018
7019 /* Make sure any changes are loaded the next time around. */
7020 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_SSE;
7021}
7022
7023
7024/**
7025 * Hook for actualizing the guest YMM0..15 and MXCSR register state for read
7026 * only.
7027 *
7028 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7029 *
7030 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7031 */
7032DECLINLINE(void) iemFpuActualizeAvxStateForRead(PVMCPUCC pVCpu)
7033{
7034#ifdef IN_RING3
7035 NOREF(pVCpu);
7036#else
7037 CPUMRZFpuStateActualizeAvxForRead(pVCpu);
7038#endif
7039 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7040}
7041
7042
7043/**
7044 * Hook for actualizing the guest YMM0..15 and MXCSR register state for
7045 * read+write.
7046 *
7047 * This is necessary in ring-0 and raw-mode context (nop in ring-3).
7048 *
7049 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7050 */
7051DECLINLINE(void) iemFpuActualizeAvxStateForChange(PVMCPUCC pVCpu)
7052{
7053#ifdef IN_RING3
7054 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_FPU_REM);
7055#else
7056 CPUMRZFpuStateActualizeForChange(pVCpu);
7057#endif
7058 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_X87 | CPUMCTX_EXTRN_SSE_AVX | CPUMCTX_EXTRN_OTHER_XSAVE | CPUMCTX_EXTRN_XCRx);
7059
7060 /* Just assume we're going to make changes to the SSE and YMM_HI parts. */
7061 pVCpu->cpum.GstCtx.XState.Hdr.bmXState |= XSAVE_C_YMM | XSAVE_C_SSE;
7062}
7063
7064
7065/**
7066 * Stores a QNaN value into a FPU register.
7067 *
7068 * @param pReg Pointer to the register.
7069 */
7070DECLINLINE(void) iemFpuStoreQNan(PRTFLOAT80U pReg)
7071{
7072 pReg->au32[0] = UINT32_C(0x00000000);
7073 pReg->au32[1] = UINT32_C(0xc0000000);
7074 pReg->au16[4] = UINT16_C(0xffff);
7075}
7076
7077
7078/**
7079 * Updates the FOP, FPU.CS and FPUIP registers.
7080 *
7081 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7082 * @param pFpuCtx The FPU context.
7083 */
7084DECLINLINE(void) iemFpuUpdateOpcodeAndIpWorker(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx)
7085{
7086 Assert(pVCpu->iem.s.uFpuOpcode != UINT16_MAX);
7087 pFpuCtx->FOP = pVCpu->iem.s.uFpuOpcode;
7088 /** @todo x87.CS and FPUIP needs to be kept seperately. */
7089 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7090 {
7091 /** @todo Testcase: making assumptions about how FPUIP and FPUDP are handled
7092 * happens in real mode here based on the fnsave and fnstenv images. */
7093 pFpuCtx->CS = 0;
7094 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.eip | ((uint32_t)pVCpu->cpum.GstCtx.cs.Sel << 4);
7095 }
7096 else if (!IEM_IS_LONG_MODE(pVCpu))
7097 {
7098 pFpuCtx->CS = pVCpu->cpum.GstCtx.cs.Sel;
7099 pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7100 }
7101 else
7102 *(uint64_t *)&pFpuCtx->FPUIP = pVCpu->cpum.GstCtx.rip;
7103}
7104
7105
7106/**
7107 * Updates the x87.DS and FPUDP registers.
7108 *
7109 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7110 * @param pFpuCtx The FPU context.
7111 * @param iEffSeg The effective segment register.
7112 * @param GCPtrEff The effective address relative to @a iEffSeg.
7113 */
7114DECLINLINE(void) iemFpuUpdateDP(PVMCPUCC pVCpu, PX86FXSTATE pFpuCtx, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7115{
7116 RTSEL sel;
7117 switch (iEffSeg)
7118 {
7119 case X86_SREG_DS: sel = pVCpu->cpum.GstCtx.ds.Sel; break;
7120 case X86_SREG_SS: sel = pVCpu->cpum.GstCtx.ss.Sel; break;
7121 case X86_SREG_CS: sel = pVCpu->cpum.GstCtx.cs.Sel; break;
7122 case X86_SREG_ES: sel = pVCpu->cpum.GstCtx.es.Sel; break;
7123 case X86_SREG_FS: sel = pVCpu->cpum.GstCtx.fs.Sel; break;
7124 case X86_SREG_GS: sel = pVCpu->cpum.GstCtx.gs.Sel; break;
7125 default:
7126 AssertMsgFailed(("%d\n", iEffSeg));
7127 sel = pVCpu->cpum.GstCtx.ds.Sel;
7128 }
7129 /** @todo pFpuCtx->DS and FPUDP needs to be kept seperately. */
7130 if (IEM_IS_REAL_OR_V86_MODE(pVCpu))
7131 {
7132 pFpuCtx->DS = 0;
7133 pFpuCtx->FPUDP = (uint32_t)GCPtrEff + ((uint32_t)sel << 4);
7134 }
7135 else if (!IEM_IS_LONG_MODE(pVCpu))
7136 {
7137 pFpuCtx->DS = sel;
7138 pFpuCtx->FPUDP = GCPtrEff;
7139 }
7140 else
7141 *(uint64_t *)&pFpuCtx->FPUDP = GCPtrEff;
7142}
7143
7144
7145/**
7146 * Rotates the stack registers in the push direction.
7147 *
7148 * @param pFpuCtx The FPU context.
7149 * @remarks This is a complete waste of time, but fxsave stores the registers in
7150 * stack order.
7151 */
7152DECLINLINE(void) iemFpuRotateStackPush(PX86FXSTATE pFpuCtx)
7153{
7154 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[7].r80;
7155 pFpuCtx->aRegs[7].r80 = pFpuCtx->aRegs[6].r80;
7156 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[5].r80;
7157 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[4].r80;
7158 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[3].r80;
7159 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[2].r80;
7160 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[1].r80;
7161 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[0].r80;
7162 pFpuCtx->aRegs[0].r80 = r80Tmp;
7163}
7164
7165
7166/**
7167 * Rotates the stack registers in the pop direction.
7168 *
7169 * @param pFpuCtx The FPU context.
7170 * @remarks This is a complete waste of time, but fxsave stores the registers in
7171 * stack order.
7172 */
7173DECLINLINE(void) iemFpuRotateStackPop(PX86FXSTATE pFpuCtx)
7174{
7175 RTFLOAT80U r80Tmp = pFpuCtx->aRegs[0].r80;
7176 pFpuCtx->aRegs[0].r80 = pFpuCtx->aRegs[1].r80;
7177 pFpuCtx->aRegs[1].r80 = pFpuCtx->aRegs[2].r80;
7178 pFpuCtx->aRegs[2].r80 = pFpuCtx->aRegs[3].r80;
7179 pFpuCtx->aRegs[3].r80 = pFpuCtx->aRegs[4].r80;
7180 pFpuCtx->aRegs[4].r80 = pFpuCtx->aRegs[5].r80;
7181 pFpuCtx->aRegs[5].r80 = pFpuCtx->aRegs[6].r80;
7182 pFpuCtx->aRegs[6].r80 = pFpuCtx->aRegs[7].r80;
7183 pFpuCtx->aRegs[7].r80 = r80Tmp;
7184}
7185
7186
7187/**
7188 * Updates FSW and pushes a FPU result onto the FPU stack if no pending
7189 * exception prevents it.
7190 *
7191 * @param pResult The FPU operation result to push.
7192 * @param pFpuCtx The FPU context.
7193 */
7194IEM_STATIC void iemFpuMaybePushResult(PIEMFPURESULT pResult, PX86FXSTATE pFpuCtx)
7195{
7196 /* Update FSW and bail if there are pending exceptions afterwards. */
7197 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7198 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7199 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7200 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7201 {
7202 pFpuCtx->FSW = fFsw;
7203 return;
7204 }
7205
7206 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7207 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7208 {
7209 /* All is fine, push the actual value. */
7210 pFpuCtx->FTW |= RT_BIT(iNewTop);
7211 pFpuCtx->aRegs[7].r80 = pResult->r80Result;
7212 }
7213 else if (pFpuCtx->FCW & X86_FCW_IM)
7214 {
7215 /* Masked stack overflow, push QNaN. */
7216 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7217 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7218 }
7219 else
7220 {
7221 /* Raise stack overflow, don't push anything. */
7222 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7223 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7224 return;
7225 }
7226
7227 fFsw &= ~X86_FSW_TOP_MASK;
7228 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7229 pFpuCtx->FSW = fFsw;
7230
7231 iemFpuRotateStackPush(pFpuCtx);
7232}
7233
7234
7235/**
7236 * Stores a result in a FPU register and updates the FSW and FTW.
7237 *
7238 * @param pFpuCtx The FPU context.
7239 * @param pResult The result to store.
7240 * @param iStReg Which FPU register to store it in.
7241 */
7242IEM_STATIC void iemFpuStoreResultOnly(PX86FXSTATE pFpuCtx, PIEMFPURESULT pResult, uint8_t iStReg)
7243{
7244 Assert(iStReg < 8);
7245 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7246 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7247 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_TOP_MASK;
7248 pFpuCtx->FTW |= RT_BIT(iReg);
7249 pFpuCtx->aRegs[iStReg].r80 = pResult->r80Result;
7250}
7251
7252
7253/**
7254 * Only updates the FPU status word (FSW) with the result of the current
7255 * instruction.
7256 *
7257 * @param pFpuCtx The FPU context.
7258 * @param u16FSW The FSW output of the current instruction.
7259 */
7260IEM_STATIC void iemFpuUpdateFSWOnly(PX86FXSTATE pFpuCtx, uint16_t u16FSW)
7261{
7262 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7263 pFpuCtx->FSW |= u16FSW & ~X86_FSW_TOP_MASK;
7264}
7265
7266
7267/**
7268 * Pops one item off the FPU stack if no pending exception prevents it.
7269 *
7270 * @param pFpuCtx The FPU context.
7271 */
7272IEM_STATIC void iemFpuMaybePopOne(PX86FXSTATE pFpuCtx)
7273{
7274 /* Check pending exceptions. */
7275 uint16_t uFSW = pFpuCtx->FSW;
7276 if ( (pFpuCtx->FSW & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7277 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7278 return;
7279
7280 /* TOP--. */
7281 uint16_t iOldTop = uFSW & X86_FSW_TOP_MASK;
7282 uFSW &= ~X86_FSW_TOP_MASK;
7283 uFSW |= (iOldTop + (UINT16_C(9) << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7284 pFpuCtx->FSW = uFSW;
7285
7286 /* Mark the previous ST0 as empty. */
7287 iOldTop >>= X86_FSW_TOP_SHIFT;
7288 pFpuCtx->FTW &= ~RT_BIT(iOldTop);
7289
7290 /* Rotate the registers. */
7291 iemFpuRotateStackPop(pFpuCtx);
7292}
7293
7294
7295/**
7296 * Pushes a FPU result onto the FPU stack if no pending exception prevents it.
7297 *
7298 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7299 * @param pResult The FPU operation result to push.
7300 */
7301IEM_STATIC void iemFpuPushResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult)
7302{
7303 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7304 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7305 iemFpuMaybePushResult(pResult, pFpuCtx);
7306}
7307
7308
7309/**
7310 * Pushes a FPU result onto the FPU stack if no pending exception prevents it,
7311 * and sets FPUDP and FPUDS.
7312 *
7313 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7314 * @param pResult The FPU operation result to push.
7315 * @param iEffSeg The effective segment register.
7316 * @param GCPtrEff The effective address relative to @a iEffSeg.
7317 */
7318IEM_STATIC void iemFpuPushResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7319{
7320 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7321 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7322 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7323 iemFpuMaybePushResult(pResult, pFpuCtx);
7324}
7325
7326
7327/**
7328 * Replace ST0 with the first value and push the second onto the FPU stack,
7329 * unless a pending exception prevents it.
7330 *
7331 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7332 * @param pResult The FPU operation result to store and push.
7333 */
7334IEM_STATIC void iemFpuPushResultTwo(PVMCPUCC pVCpu, PIEMFPURESULTTWO pResult)
7335{
7336 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7337 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7338
7339 /* Update FSW and bail if there are pending exceptions afterwards. */
7340 uint16_t fFsw = pFpuCtx->FSW & ~X86_FSW_C_MASK;
7341 fFsw |= pResult->FSW & ~X86_FSW_TOP_MASK;
7342 if ( (fFsw & (X86_FSW_IE | X86_FSW_ZE | X86_FSW_DE))
7343 & ~(pFpuCtx->FCW & (X86_FCW_IM | X86_FCW_ZM | X86_FCW_DM)))
7344 {
7345 pFpuCtx->FSW = fFsw;
7346 return;
7347 }
7348
7349 uint16_t iNewTop = (X86_FSW_TOP_GET(fFsw) + 7) & X86_FSW_TOP_SMASK;
7350 if (!(pFpuCtx->FTW & RT_BIT(iNewTop)))
7351 {
7352 /* All is fine, push the actual value. */
7353 pFpuCtx->FTW |= RT_BIT(iNewTop);
7354 pFpuCtx->aRegs[0].r80 = pResult->r80Result1;
7355 pFpuCtx->aRegs[7].r80 = pResult->r80Result2;
7356 }
7357 else if (pFpuCtx->FCW & X86_FCW_IM)
7358 {
7359 /* Masked stack overflow, push QNaN. */
7360 fFsw |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1;
7361 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7362 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7363 }
7364 else
7365 {
7366 /* Raise stack overflow, don't push anything. */
7367 pFpuCtx->FSW |= pResult->FSW & ~X86_FSW_C_MASK;
7368 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_C1 | X86_FSW_B | X86_FSW_ES;
7369 return;
7370 }
7371
7372 fFsw &= ~X86_FSW_TOP_MASK;
7373 fFsw |= iNewTop << X86_FSW_TOP_SHIFT;
7374 pFpuCtx->FSW = fFsw;
7375
7376 iemFpuRotateStackPush(pFpuCtx);
7377}
7378
7379
7380/**
7381 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7382 * FOP.
7383 *
7384 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7385 * @param pResult The result to store.
7386 * @param iStReg Which FPU register to store it in.
7387 */
7388IEM_STATIC void iemFpuStoreResult(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7389{
7390 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7391 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7392 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7393}
7394
7395
7396/**
7397 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, and
7398 * FOP, and then pops the stack.
7399 *
7400 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7401 * @param pResult The result to store.
7402 * @param iStReg Which FPU register to store it in.
7403 */
7404IEM_STATIC void iemFpuStoreResultThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg)
7405{
7406 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7407 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7408 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7409 iemFpuMaybePopOne(pFpuCtx);
7410}
7411
7412
7413/**
7414 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7415 * FPUDP, and FPUDS.
7416 *
7417 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7418 * @param pResult The result to store.
7419 * @param iStReg Which FPU register to store it in.
7420 * @param iEffSeg The effective memory operand selector register.
7421 * @param GCPtrEff The effective memory operand offset.
7422 */
7423IEM_STATIC void iemFpuStoreResultWithMemOp(PVMCPUCC pVCpu, PIEMFPURESULT pResult, uint8_t iStReg,
7424 uint8_t iEffSeg, RTGCPTR GCPtrEff)
7425{
7426 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7427 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7428 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7429 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7430}
7431
7432
7433/**
7434 * Stores a result in a FPU register, updates the FSW, FTW, FPUIP, FPUCS, FOP,
7435 * FPUDP, and FPUDS, and then pops the stack.
7436 *
7437 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7438 * @param pResult The result to store.
7439 * @param iStReg Which FPU register to store it in.
7440 * @param iEffSeg The effective memory operand selector register.
7441 * @param GCPtrEff The effective memory operand offset.
7442 */
7443IEM_STATIC void iemFpuStoreResultWithMemOpThenPop(PVMCPUCC pVCpu, PIEMFPURESULT pResult,
7444 uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7445{
7446 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7447 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7448 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7449 iemFpuStoreResultOnly(pFpuCtx, pResult, iStReg);
7450 iemFpuMaybePopOne(pFpuCtx);
7451}
7452
7453
7454/**
7455 * Updates the FOP, FPUIP, and FPUCS. For FNOP.
7456 *
7457 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7458 */
7459IEM_STATIC void iemFpuUpdateOpcodeAndIp(PVMCPUCC pVCpu)
7460{
7461 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7462 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7463}
7464
7465
7466/**
7467 * Marks the specified stack register as free (for FFREE).
7468 *
7469 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7470 * @param iStReg The register to free.
7471 */
7472IEM_STATIC void iemFpuStackFree(PVMCPUCC pVCpu, uint8_t iStReg)
7473{
7474 Assert(iStReg < 8);
7475 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7476 uint8_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7477 pFpuCtx->FTW &= ~RT_BIT(iReg);
7478}
7479
7480
7481/**
7482 * Increments FSW.TOP, i.e. pops an item off the stack without freeing it.
7483 *
7484 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7485 */
7486IEM_STATIC void iemFpuStackIncTop(PVMCPUCC pVCpu)
7487{
7488 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7489 uint16_t uFsw = pFpuCtx->FSW;
7490 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7491 uTop = (uTop + (1 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7492 uFsw &= ~X86_FSW_TOP_MASK;
7493 uFsw |= uTop;
7494 pFpuCtx->FSW = uFsw;
7495}
7496
7497
7498/**
7499 * Decrements FSW.TOP, i.e. push an item off the stack without storing anything.
7500 *
7501 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7502 */
7503IEM_STATIC void iemFpuStackDecTop(PVMCPUCC pVCpu)
7504{
7505 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7506 uint16_t uFsw = pFpuCtx->FSW;
7507 uint16_t uTop = uFsw & X86_FSW_TOP_MASK;
7508 uTop = (uTop + (7 << X86_FSW_TOP_SHIFT)) & X86_FSW_TOP_MASK;
7509 uFsw &= ~X86_FSW_TOP_MASK;
7510 uFsw |= uTop;
7511 pFpuCtx->FSW = uFsw;
7512}
7513
7514
7515/**
7516 * Updates the FSW, FOP, FPUIP, and FPUCS.
7517 *
7518 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7519 * @param u16FSW The FSW from the current instruction.
7520 */
7521IEM_STATIC void iemFpuUpdateFSW(PVMCPUCC pVCpu, uint16_t u16FSW)
7522{
7523 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7524 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7525 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7526}
7527
7528
7529/**
7530 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack.
7531 *
7532 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7533 * @param u16FSW The FSW from the current instruction.
7534 */
7535IEM_STATIC void iemFpuUpdateFSWThenPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7536{
7537 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7538 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7539 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7540 iemFpuMaybePopOne(pFpuCtx);
7541}
7542
7543
7544/**
7545 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS.
7546 *
7547 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7548 * @param u16FSW The FSW from the current instruction.
7549 * @param iEffSeg The effective memory operand selector register.
7550 * @param GCPtrEff The effective memory operand offset.
7551 */
7552IEM_STATIC void iemFpuUpdateFSWWithMemOp(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7553{
7554 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7555 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7556 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7557 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7558}
7559
7560
7561/**
7562 * Updates the FSW, FOP, FPUIP, and FPUCS, then pops the stack twice.
7563 *
7564 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7565 * @param u16FSW The FSW from the current instruction.
7566 */
7567IEM_STATIC void iemFpuUpdateFSWThenPopPop(PVMCPUCC pVCpu, uint16_t u16FSW)
7568{
7569 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7570 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7571 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7572 iemFpuMaybePopOne(pFpuCtx);
7573 iemFpuMaybePopOne(pFpuCtx);
7574}
7575
7576
7577/**
7578 * Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS, then pops the stack.
7579 *
7580 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7581 * @param u16FSW The FSW from the current instruction.
7582 * @param iEffSeg The effective memory operand selector register.
7583 * @param GCPtrEff The effective memory operand offset.
7584 */
7585IEM_STATIC void iemFpuUpdateFSWWithMemOpThenPop(PVMCPUCC pVCpu, uint16_t u16FSW, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7586{
7587 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7588 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7589 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7590 iemFpuUpdateFSWOnly(pFpuCtx, u16FSW);
7591 iemFpuMaybePopOne(pFpuCtx);
7592}
7593
7594
7595/**
7596 * Worker routine for raising an FPU stack underflow exception.
7597 *
7598 * @param pFpuCtx The FPU context.
7599 * @param iStReg The stack register being accessed.
7600 */
7601IEM_STATIC void iemFpuStackUnderflowOnly(PX86FXSTATE pFpuCtx, uint8_t iStReg)
7602{
7603 Assert(iStReg < 8 || iStReg == UINT8_MAX);
7604 if (pFpuCtx->FCW & X86_FCW_IM)
7605 {
7606 /* Masked underflow. */
7607 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7608 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7609 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7610 if (iStReg != UINT8_MAX)
7611 {
7612 pFpuCtx->FTW |= RT_BIT(iReg);
7613 iemFpuStoreQNan(&pFpuCtx->aRegs[iStReg].r80);
7614 }
7615 }
7616 else
7617 {
7618 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7619 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7620 }
7621}
7622
7623
7624/**
7625 * Raises a FPU stack underflow exception.
7626 *
7627 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7628 * @param iStReg The destination register that should be loaded
7629 * with QNaN if \#IS is not masked. Specify
7630 * UINT8_MAX if none (like for fcom).
7631 */
7632DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflow(PVMCPUCC pVCpu, uint8_t iStReg)
7633{
7634 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7635 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7636 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7637}
7638
7639
7640DECL_NO_INLINE(IEM_STATIC, void)
7641iemFpuStackUnderflowWithMemOp(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7642{
7643 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7644 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7645 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7646 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7647}
7648
7649
7650DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPop(PVMCPUCC pVCpu, uint8_t iStReg)
7651{
7652 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7653 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7654 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7655 iemFpuMaybePopOne(pFpuCtx);
7656}
7657
7658
7659DECL_NO_INLINE(IEM_STATIC, void)
7660iemFpuStackUnderflowWithMemOpThenPop(PVMCPUCC pVCpu, uint8_t iStReg, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7661{
7662 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7663 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7664 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7665 iemFpuStackUnderflowOnly(pFpuCtx, iStReg);
7666 iemFpuMaybePopOne(pFpuCtx);
7667}
7668
7669
7670DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackUnderflowThenPopPop(PVMCPUCC pVCpu)
7671{
7672 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7673 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7674 iemFpuStackUnderflowOnly(pFpuCtx, UINT8_MAX);
7675 iemFpuMaybePopOne(pFpuCtx);
7676 iemFpuMaybePopOne(pFpuCtx);
7677}
7678
7679
7680DECL_NO_INLINE(IEM_STATIC, void)
7681iemFpuStackPushUnderflow(PVMCPUCC pVCpu)
7682{
7683 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7684 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7685
7686 if (pFpuCtx->FCW & X86_FCW_IM)
7687 {
7688 /* Masked overflow - Push QNaN. */
7689 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7690 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7691 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7692 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7693 pFpuCtx->FTW |= RT_BIT(iNewTop);
7694 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7695 iemFpuRotateStackPush(pFpuCtx);
7696 }
7697 else
7698 {
7699 /* Exception pending - don't change TOP or the register stack. */
7700 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7701 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7702 }
7703}
7704
7705
7706DECL_NO_INLINE(IEM_STATIC, void)
7707iemFpuStackPushUnderflowTwo(PVMCPUCC pVCpu)
7708{
7709 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7710 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7711
7712 if (pFpuCtx->FCW & X86_FCW_IM)
7713 {
7714 /* Masked overflow - Push QNaN. */
7715 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7716 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7717 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF;
7718 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7719 pFpuCtx->FTW |= RT_BIT(iNewTop);
7720 iemFpuStoreQNan(&pFpuCtx->aRegs[0].r80);
7721 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7722 iemFpuRotateStackPush(pFpuCtx);
7723 }
7724 else
7725 {
7726 /* Exception pending - don't change TOP or the register stack. */
7727 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7728 pFpuCtx->FSW |= X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7729 }
7730}
7731
7732
7733/**
7734 * Worker routine for raising an FPU stack overflow exception on a push.
7735 *
7736 * @param pFpuCtx The FPU context.
7737 */
7738IEM_STATIC void iemFpuStackPushOverflowOnly(PX86FXSTATE pFpuCtx)
7739{
7740 if (pFpuCtx->FCW & X86_FCW_IM)
7741 {
7742 /* Masked overflow. */
7743 uint16_t iNewTop = (X86_FSW_TOP_GET(pFpuCtx->FSW) + 7) & X86_FSW_TOP_SMASK;
7744 pFpuCtx->FSW &= ~(X86_FSW_TOP_MASK | X86_FSW_C_MASK);
7745 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF;
7746 pFpuCtx->FSW |= iNewTop << X86_FSW_TOP_SHIFT;
7747 pFpuCtx->FTW |= RT_BIT(iNewTop);
7748 iemFpuStoreQNan(&pFpuCtx->aRegs[7].r80);
7749 iemFpuRotateStackPush(pFpuCtx);
7750 }
7751 else
7752 {
7753 /* Exception pending - don't change TOP or the register stack. */
7754 pFpuCtx->FSW &= ~X86_FSW_C_MASK;
7755 pFpuCtx->FSW |= X86_FSW_C1 | X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B;
7756 }
7757}
7758
7759
7760/**
7761 * Raises a FPU stack overflow exception on a push.
7762 *
7763 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7764 */
7765DECL_NO_INLINE(IEM_STATIC, void) iemFpuStackPushOverflow(PVMCPUCC pVCpu)
7766{
7767 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7768 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7769 iemFpuStackPushOverflowOnly(pFpuCtx);
7770}
7771
7772
7773/**
7774 * Raises a FPU stack overflow exception on a push with a memory operand.
7775 *
7776 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7777 * @param iEffSeg The effective memory operand selector register.
7778 * @param GCPtrEff The effective memory operand offset.
7779 */
7780DECL_NO_INLINE(IEM_STATIC, void)
7781iemFpuStackPushOverflowWithMemOp(PVMCPUCC pVCpu, uint8_t iEffSeg, RTGCPTR GCPtrEff)
7782{
7783 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7784 iemFpuUpdateDP(pVCpu, pFpuCtx, iEffSeg, GCPtrEff);
7785 iemFpuUpdateOpcodeAndIpWorker(pVCpu, pFpuCtx);
7786 iemFpuStackPushOverflowOnly(pFpuCtx);
7787}
7788
7789
7790IEM_STATIC int iemFpuStRegNotEmpty(PVMCPUCC pVCpu, uint8_t iStReg)
7791{
7792 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7793 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7794 if (pFpuCtx->FTW & RT_BIT(iReg))
7795 return VINF_SUCCESS;
7796 return VERR_NOT_FOUND;
7797}
7798
7799
7800IEM_STATIC int iemFpuStRegNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg, PCRTFLOAT80U *ppRef)
7801{
7802 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7803 uint16_t iReg = (X86_FSW_TOP_GET(pFpuCtx->FSW) + iStReg) & X86_FSW_TOP_SMASK;
7804 if (pFpuCtx->FTW & RT_BIT(iReg))
7805 {
7806 *ppRef = &pFpuCtx->aRegs[iStReg].r80;
7807 return VINF_SUCCESS;
7808 }
7809 return VERR_NOT_FOUND;
7810}
7811
7812
7813IEM_STATIC int iemFpu2StRegsNotEmptyRef(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0,
7814 uint8_t iStReg1, PCRTFLOAT80U *ppRef1)
7815{
7816 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7817 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7818 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7819 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7820 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7821 {
7822 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7823 *ppRef1 = &pFpuCtx->aRegs[iStReg1].r80;
7824 return VINF_SUCCESS;
7825 }
7826 return VERR_NOT_FOUND;
7827}
7828
7829
7830IEM_STATIC int iemFpu2StRegsNotEmptyRefFirst(PVMCPUCC pVCpu, uint8_t iStReg0, PCRTFLOAT80U *ppRef0, uint8_t iStReg1)
7831{
7832 PX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
7833 uint16_t iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7834 uint16_t iReg0 = (iTop + iStReg0) & X86_FSW_TOP_SMASK;
7835 uint16_t iReg1 = (iTop + iStReg1) & X86_FSW_TOP_SMASK;
7836 if ((pFpuCtx->FTW & (RT_BIT(iReg0) | RT_BIT(iReg1))) == (RT_BIT(iReg0) | RT_BIT(iReg1)))
7837 {
7838 *ppRef0 = &pFpuCtx->aRegs[iStReg0].r80;
7839 return VINF_SUCCESS;
7840 }
7841 return VERR_NOT_FOUND;
7842}
7843
7844
7845/**
7846 * Updates the FPU exception status after FCW is changed.
7847 *
7848 * @param pFpuCtx The FPU context.
7849 */
7850IEM_STATIC void iemFpuRecalcExceptionStatus(PX86FXSTATE pFpuCtx)
7851{
7852 uint16_t u16Fsw = pFpuCtx->FSW;
7853 if ((u16Fsw & X86_FSW_XCPT_MASK) & ~(pFpuCtx->FCW & X86_FCW_XCPT_MASK))
7854 u16Fsw |= X86_FSW_ES | X86_FSW_B;
7855 else
7856 u16Fsw &= ~(X86_FSW_ES | X86_FSW_B);
7857 pFpuCtx->FSW = u16Fsw;
7858}
7859
7860
7861/**
7862 * Calculates the full FTW (FPU tag word) for use in FNSTENV and FNSAVE.
7863 *
7864 * @returns The full FTW.
7865 * @param pFpuCtx The FPU context.
7866 */
7867IEM_STATIC uint16_t iemFpuCalcFullFtw(PCX86FXSTATE pFpuCtx)
7868{
7869 uint8_t const u8Ftw = (uint8_t)pFpuCtx->FTW;
7870 uint16_t u16Ftw = 0;
7871 unsigned const iTop = X86_FSW_TOP_GET(pFpuCtx->FSW);
7872 for (unsigned iSt = 0; iSt < 8; iSt++)
7873 {
7874 unsigned const iReg = (iSt + iTop) & 7;
7875 if (!(u8Ftw & RT_BIT(iReg)))
7876 u16Ftw |= 3 << (iReg * 2); /* empty */
7877 else
7878 {
7879 uint16_t uTag;
7880 PCRTFLOAT80U const pr80Reg = &pFpuCtx->aRegs[iSt].r80;
7881 if (pr80Reg->s.uExponent == 0x7fff)
7882 uTag = 2; /* Exponent is all 1's => Special. */
7883 else if (pr80Reg->s.uExponent == 0x0000)
7884 {
7885 if (pr80Reg->s.u64Mantissa == 0x0000)
7886 uTag = 1; /* All bits are zero => Zero. */
7887 else
7888 uTag = 2; /* Must be special. */
7889 }
7890 else if (pr80Reg->s.u64Mantissa & RT_BIT_64(63)) /* The J bit. */
7891 uTag = 0; /* Valid. */
7892 else
7893 uTag = 2; /* Must be special. */
7894
7895 u16Ftw |= uTag << (iReg * 2); /* empty */
7896 }
7897 }
7898
7899 return u16Ftw;
7900}
7901
7902
7903/**
7904 * Converts a full FTW to a compressed one (for use in FLDENV and FRSTOR).
7905 *
7906 * @returns The compressed FTW.
7907 * @param u16FullFtw The full FTW to convert.
7908 */
7909IEM_STATIC uint16_t iemFpuCompressFtw(uint16_t u16FullFtw)
7910{
7911 uint8_t u8Ftw = 0;
7912 for (unsigned i = 0; i < 8; i++)
7913 {
7914 if ((u16FullFtw & 3) != 3 /*empty*/)
7915 u8Ftw |= RT_BIT(i);
7916 u16FullFtw >>= 2;
7917 }
7918
7919 return u8Ftw;
7920}
7921
7922/** @} */
7923
7924
7925/** @name Memory access.
7926 *
7927 * @{
7928 */
7929
7930
7931/**
7932 * Updates the IEMCPU::cbWritten counter if applicable.
7933 *
7934 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7935 * @param fAccess The access being accounted for.
7936 * @param cbMem The access size.
7937 */
7938DECL_FORCE_INLINE(void) iemMemUpdateWrittenCounter(PVMCPUCC pVCpu, uint32_t fAccess, size_t cbMem)
7939{
7940 if ( (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_STACK | IEM_ACCESS_TYPE_WRITE)
7941 || (fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_WRITE)) == (IEM_ACCESS_WHAT_DATA | IEM_ACCESS_TYPE_WRITE) )
7942 pVCpu->iem.s.cbWritten += (uint32_t)cbMem;
7943}
7944
7945
7946/**
7947 * Checks if the given segment can be written to, raise the appropriate
7948 * exception if not.
7949 *
7950 * @returns VBox strict status code.
7951 *
7952 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7953 * @param pHid Pointer to the hidden register.
7954 * @param iSegReg The register number.
7955 * @param pu64BaseAddr Where to return the base address to use for the
7956 * segment. (In 64-bit code it may differ from the
7957 * base in the hidden segment.)
7958 */
7959IEM_STATIC VBOXSTRICTRC
7960iemMemSegCheckWriteAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
7961{
7962 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
7963
7964 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
7965 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
7966 else
7967 {
7968 if (!pHid->Attr.n.u1Present)
7969 {
7970 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
7971 AssertRelease(uSel == 0);
7972 Log(("iemMemSegCheckWriteAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
7973 return iemRaiseGeneralProtectionFault0(pVCpu);
7974 }
7975
7976 if ( ( (pHid->Attr.n.u4Type & X86_SEL_TYPE_CODE)
7977 || !(pHid->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
7978 && pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT )
7979 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
7980 *pu64BaseAddr = pHid->u64Base;
7981 }
7982 return VINF_SUCCESS;
7983}
7984
7985
7986/**
7987 * Checks if the given segment can be read from, raise the appropriate
7988 * exception if not.
7989 *
7990 * @returns VBox strict status code.
7991 *
7992 * @param pVCpu The cross context virtual CPU structure of the calling thread.
7993 * @param pHid Pointer to the hidden register.
7994 * @param iSegReg The register number.
7995 * @param pu64BaseAddr Where to return the base address to use for the
7996 * segment. (In 64-bit code it may differ from the
7997 * base in the hidden segment.)
7998 */
7999IEM_STATIC VBOXSTRICTRC
8000iemMemSegCheckReadAccessEx(PVMCPUCC pVCpu, PCCPUMSELREGHID pHid, uint8_t iSegReg, uint64_t *pu64BaseAddr)
8001{
8002 IEM_CTX_ASSERT(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8003
8004 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
8005 *pu64BaseAddr = iSegReg < X86_SREG_FS ? 0 : pHid->u64Base;
8006 else
8007 {
8008 if (!pHid->Attr.n.u1Present)
8009 {
8010 uint16_t uSel = iemSRegFetchU16(pVCpu, iSegReg);
8011 AssertRelease(uSel == 0);
8012 Log(("iemMemSegCheckReadAccessEx: %#x (index %u) - bad selector -> #GP\n", uSel, iSegReg));
8013 return iemRaiseGeneralProtectionFault0(pVCpu);
8014 }
8015
8016 if ((pHid->Attr.n.u4Type & (X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ)) == X86_SEL_TYPE_CODE)
8017 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
8018 *pu64BaseAddr = pHid->u64Base;
8019 }
8020 return VINF_SUCCESS;
8021}
8022
8023
8024/**
8025 * Applies the segment limit, base and attributes.
8026 *
8027 * This may raise a \#GP or \#SS.
8028 *
8029 * @returns VBox strict status code.
8030 *
8031 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8032 * @param fAccess The kind of access which is being performed.
8033 * @param iSegReg The index of the segment register to apply.
8034 * This is UINT8_MAX if none (for IDT, GDT, LDT,
8035 * TSS, ++).
8036 * @param cbMem The access size.
8037 * @param pGCPtrMem Pointer to the guest memory address to apply
8038 * segmentation to. Input and output parameter.
8039 */
8040IEM_STATIC VBOXSTRICTRC
8041iemMemApplySegment(PVMCPUCC pVCpu, uint32_t fAccess, uint8_t iSegReg, size_t cbMem, PRTGCPTR pGCPtrMem)
8042{
8043 if (iSegReg == UINT8_MAX)
8044 return VINF_SUCCESS;
8045
8046 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
8047 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
8048 switch (pVCpu->iem.s.enmCpuMode)
8049 {
8050 case IEMMODE_16BIT:
8051 case IEMMODE_32BIT:
8052 {
8053 RTGCPTR32 GCPtrFirst32 = (RTGCPTR32)*pGCPtrMem;
8054 RTGCPTR32 GCPtrLast32 = GCPtrFirst32 + (uint32_t)cbMem - 1;
8055
8056 if ( pSel->Attr.n.u1Present
8057 && !pSel->Attr.n.u1Unusable)
8058 {
8059 Assert(pSel->Attr.n.u1DescType);
8060 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_CODE))
8061 {
8062 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8063 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_WRITE) )
8064 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8065
8066 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8067 {
8068 /** @todo CPL check. */
8069 }
8070
8071 /*
8072 * There are two kinds of data selectors, normal and expand down.
8073 */
8074 if (!(pSel->Attr.n.u4Type & X86_SEL_TYPE_DOWN))
8075 {
8076 if ( GCPtrFirst32 > pSel->u32Limit
8077 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8078 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8079 }
8080 else
8081 {
8082 /*
8083 * The upper boundary is defined by the B bit, not the G bit!
8084 */
8085 if ( GCPtrFirst32 < pSel->u32Limit + UINT32_C(1)
8086 || GCPtrLast32 > (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff)))
8087 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8088 }
8089 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8090 }
8091 else
8092 {
8093
8094 /*
8095 * Code selector and usually be used to read thru, writing is
8096 * only permitted in real and V8086 mode.
8097 */
8098 if ( ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8099 || ( (fAccess & IEM_ACCESS_TYPE_READ)
8100 && !(pSel->Attr.n.u4Type & X86_SEL_TYPE_READ)) )
8101 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )
8102 return iemRaiseSelectorInvalidAccess(pVCpu, iSegReg, fAccess);
8103
8104 if ( GCPtrFirst32 > pSel->u32Limit
8105 || GCPtrLast32 > pSel->u32Limit) /* yes, in real mode too (since 80286). */
8106 return iemRaiseSelectorBounds(pVCpu, iSegReg, fAccess);
8107
8108 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu))
8109 {
8110 /** @todo CPL check. */
8111 }
8112
8113 *pGCPtrMem = GCPtrFirst32 += (uint32_t)pSel->u64Base;
8114 }
8115 }
8116 else
8117 return iemRaiseGeneralProtectionFault0(pVCpu);
8118 return VINF_SUCCESS;
8119 }
8120
8121 case IEMMODE_64BIT:
8122 {
8123 RTGCPTR GCPtrMem = *pGCPtrMem;
8124 if (iSegReg == X86_SREG_GS || iSegReg == X86_SREG_FS)
8125 *pGCPtrMem = GCPtrMem + pSel->u64Base;
8126
8127 Assert(cbMem >= 1);
8128 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
8129 return VINF_SUCCESS;
8130 /** @todo We should probably raise \#SS(0) here if segment is SS; see AMD spec.
8131 * 4.12.2 "Data Limit Checks in 64-bit Mode". */
8132 return iemRaiseGeneralProtectionFault0(pVCpu);
8133 }
8134
8135 default:
8136 AssertFailedReturn(VERR_IEM_IPE_7);
8137 }
8138}
8139
8140
8141/**
8142 * Translates a virtual address to a physical physical address and checks if we
8143 * can access the page as specified.
8144 *
8145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8146 * @param GCPtrMem The virtual address.
8147 * @param fAccess The intended access.
8148 * @param pGCPhysMem Where to return the physical address.
8149 */
8150IEM_STATIC VBOXSTRICTRC
8151iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t fAccess, PRTGCPHYS pGCPhysMem)
8152{
8153 /** @todo Need a different PGM interface here. We're currently using
8154 * generic / REM interfaces. this won't cut it for R0. */
8155 /** @todo If/when PGM handles paged real-mode, we can remove the hack in
8156 * iemSvmWorldSwitch/iemVmxWorldSwitch to work around raising a page-fault
8157 * here. */
8158 PGMPTWALK Walk;
8159 int rc = PGMGstGetPage(pVCpu, GCPtrMem, &Walk);
8160 if (RT_FAILURE(rc))
8161 {
8162 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - failed to fetch page -> #PF\n", GCPtrMem));
8163 /** @todo Check unassigned memory in unpaged mode. */
8164 /** @todo Reserved bits in page tables. Requires new PGM interface. */
8165 *pGCPhysMem = NIL_RTGCPHYS;
8166 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, rc);
8167 }
8168
8169 /* If the page is writable and does not have the no-exec bit set, all
8170 access is allowed. Otherwise we'll have to check more carefully... */
8171 if ((Walk.fEffective & (X86_PTE_RW | X86_PTE_US | X86_PTE_PAE_NX)) != (X86_PTE_RW | X86_PTE_US))
8172 {
8173 /* Write to read only memory? */
8174 if ( (fAccess & IEM_ACCESS_TYPE_WRITE)
8175 && !(Walk.fEffective & X86_PTE_RW)
8176 && ( ( pVCpu->iem.s.uCpl == 3
8177 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8178 || (pVCpu->cpum.GstCtx.cr0 & X86_CR0_WP)))
8179 {
8180 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - read-only page -> #PF\n", GCPtrMem));
8181 *pGCPhysMem = NIL_RTGCPHYS;
8182 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~IEM_ACCESS_TYPE_READ, VERR_ACCESS_DENIED);
8183 }
8184
8185 /* Kernel memory accessed by userland? */
8186 if ( !(Walk.fEffective & X86_PTE_US)
8187 && pVCpu->iem.s.uCpl == 3
8188 && !(fAccess & IEM_ACCESS_WHAT_SYS))
8189 {
8190 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - user access to kernel page -> #PF\n", GCPtrMem));
8191 *pGCPhysMem = NIL_RTGCPHYS;
8192 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess, VERR_ACCESS_DENIED);
8193 }
8194
8195 /* Executing non-executable memory? */
8196 if ( (fAccess & IEM_ACCESS_TYPE_EXEC)
8197 && (Walk.fEffective & X86_PTE_PAE_NX)
8198 && (pVCpu->cpum.GstCtx.msrEFER & MSR_K6_EFER_NXE) )
8199 {
8200 Log(("iemMemPageTranslateAndCheckAccess: GCPtrMem=%RGv - NX -> #PF\n", GCPtrMem));
8201 *pGCPhysMem = NIL_RTGCPHYS;
8202 return iemRaisePageFault(pVCpu, GCPtrMem, fAccess & ~(IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE),
8203 VERR_ACCESS_DENIED);
8204 }
8205 }
8206
8207 /*
8208 * Set the dirty / access flags.
8209 * ASSUMES this is set when the address is translated rather than on committ...
8210 */
8211 /** @todo testcase: check when A and D bits are actually set by the CPU. */
8212 uint32_t fAccessedDirty = fAccess & IEM_ACCESS_TYPE_WRITE ? X86_PTE_D | X86_PTE_A : X86_PTE_A;
8213 if ((Walk.fEffective & fAccessedDirty) != fAccessedDirty)
8214 {
8215 int rc2 = PGMGstModifyPage(pVCpu, GCPtrMem, 1, fAccessedDirty, ~(uint64_t)fAccessedDirty);
8216 AssertRC(rc2);
8217 }
8218
8219 RTGCPHYS const GCPhys = Walk.GCPhys | (GCPtrMem & PAGE_OFFSET_MASK);
8220 *pGCPhysMem = GCPhys;
8221 return VINF_SUCCESS;
8222}
8223
8224
8225
8226/**
8227 * Maps a physical page.
8228 *
8229 * @returns VBox status code (see PGMR3PhysTlbGCPhys2Ptr).
8230 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8231 * @param GCPhysMem The physical address.
8232 * @param fAccess The intended access.
8233 * @param ppvMem Where to return the mapping address.
8234 * @param pLock The PGM lock.
8235 */
8236IEM_STATIC int iemMemPageMap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock)
8237{
8238#ifdef IEM_LOG_MEMORY_WRITES
8239 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8240 return VERR_PGM_PHYS_TLB_CATCH_ALL;
8241#endif
8242
8243 /** @todo This API may require some improving later. A private deal with PGM
8244 * regarding locking and unlocking needs to be struct. A couple of TLBs
8245 * living in PGM, but with publicly accessible inlined access methods
8246 * could perhaps be an even better solution. */
8247 int rc = PGMPhysIemGCPhys2Ptr(pVCpu->CTX_SUFF(pVM), pVCpu,
8248 GCPhysMem,
8249 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE),
8250 pVCpu->iem.s.fBypassHandlers,
8251 ppvMem,
8252 pLock);
8253 /*Log(("PGMPhysIemGCPhys2Ptr %Rrc pLock=%.*Rhxs\n", rc, sizeof(*pLock), pLock));*/
8254 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc));
8255
8256 return rc;
8257}
8258
8259
8260/**
8261 * Unmap a page previously mapped by iemMemPageMap.
8262 *
8263 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8264 * @param GCPhysMem The physical address.
8265 * @param fAccess The intended access.
8266 * @param pvMem What iemMemPageMap returned.
8267 * @param pLock The PGM lock.
8268 */
8269DECLINLINE(void) iemMemPageUnmap(PVMCPUCC pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, const void *pvMem, PPGMPAGEMAPLOCK pLock)
8270{
8271 NOREF(pVCpu);
8272 NOREF(GCPhysMem);
8273 NOREF(fAccess);
8274 NOREF(pvMem);
8275 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), pLock);
8276}
8277
8278
8279/**
8280 * Looks up a memory mapping entry.
8281 *
8282 * @returns The mapping index (positive) or VERR_NOT_FOUND (negative).
8283 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8284 * @param pvMem The memory address.
8285 * @param fAccess The access to.
8286 */
8287DECLINLINE(int) iemMapLookup(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8288{
8289 Assert(pVCpu->iem.s.cActiveMappings <= RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8290 fAccess &= IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK;
8291 if ( pVCpu->iem.s.aMemMappings[0].pv == pvMem
8292 && (pVCpu->iem.s.aMemMappings[0].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8293 return 0;
8294 if ( pVCpu->iem.s.aMemMappings[1].pv == pvMem
8295 && (pVCpu->iem.s.aMemMappings[1].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8296 return 1;
8297 if ( pVCpu->iem.s.aMemMappings[2].pv == pvMem
8298 && (pVCpu->iem.s.aMemMappings[2].fAccess & (IEM_ACCESS_WHAT_MASK | IEM_ACCESS_TYPE_MASK)) == fAccess)
8299 return 2;
8300 return VERR_NOT_FOUND;
8301}
8302
8303
8304/**
8305 * Finds a free memmap entry when using iNextMapping doesn't work.
8306 *
8307 * @returns Memory mapping index, 1024 on failure.
8308 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8309 */
8310IEM_STATIC unsigned iemMemMapFindFree(PVMCPUCC pVCpu)
8311{
8312 /*
8313 * The easy case.
8314 */
8315 if (pVCpu->iem.s.cActiveMappings == 0)
8316 {
8317 pVCpu->iem.s.iNextMapping = 1;
8318 return 0;
8319 }
8320
8321 /* There should be enough mappings for all instructions. */
8322 AssertReturn(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings), 1024);
8323
8324 for (unsigned i = 0; i < RT_ELEMENTS(pVCpu->iem.s.aMemMappings); i++)
8325 if (pVCpu->iem.s.aMemMappings[i].fAccess == IEM_ACCESS_INVALID)
8326 return i;
8327
8328 AssertFailedReturn(1024);
8329}
8330
8331
8332/**
8333 * Commits a bounce buffer that needs writing back and unmaps it.
8334 *
8335 * @returns Strict VBox status code.
8336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8337 * @param iMemMap The index of the buffer to commit.
8338 * @param fPostponeFail Whether we can postpone writer failures to ring-3.
8339 * Always false in ring-3, obviously.
8340 */
8341IEM_STATIC VBOXSTRICTRC iemMemBounceBufferCommitAndUnmap(PVMCPUCC pVCpu, unsigned iMemMap, bool fPostponeFail)
8342{
8343 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
8344 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
8345#ifdef IN_RING3
8346 Assert(!fPostponeFail);
8347 RT_NOREF_PV(fPostponeFail);
8348#endif
8349
8350 /*
8351 * Do the writing.
8352 */
8353 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8354 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned)
8355 {
8356 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
8357 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8358 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8359 if (!pVCpu->iem.s.fBypassHandlers)
8360 {
8361 /*
8362 * Carefully and efficiently dealing with access handler return
8363 * codes make this a little bloated.
8364 */
8365 VBOXSTRICTRC rcStrict = PGMPhysWrite(pVM,
8366 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8367 pbBuf,
8368 cbFirst,
8369 PGMACCESSORIGIN_IEM);
8370 if (rcStrict == VINF_SUCCESS)
8371 {
8372 if (cbSecond)
8373 {
8374 rcStrict = PGMPhysWrite(pVM,
8375 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8376 pbBuf + cbFirst,
8377 cbSecond,
8378 PGMACCESSORIGIN_IEM);
8379 if (rcStrict == VINF_SUCCESS)
8380 { /* nothing */ }
8381 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8382 {
8383 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc\n",
8384 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8385 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8386 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8387 }
8388#ifndef IN_RING3
8389 else if (fPostponeFail)
8390 {
8391 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8392 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8393 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8394 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8395 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8396 return iemSetPassUpStatus(pVCpu, rcStrict);
8397 }
8398#endif
8399 else
8400 {
8401 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8402 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8403 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8404 return rcStrict;
8405 }
8406 }
8407 }
8408 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8409 {
8410 if (!cbSecond)
8411 {
8412 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc\n",
8413 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8414 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8415 }
8416 else
8417 {
8418 VBOXSTRICTRC rcStrict2 = PGMPhysWrite(pVM,
8419 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8420 pbBuf + cbFirst,
8421 cbSecond,
8422 PGMACCESSORIGIN_IEM);
8423 if (rcStrict2 == VINF_SUCCESS)
8424 {
8425 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x\n",
8426 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8427 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8428 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8429 }
8430 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8431 {
8432 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc\n",
8433 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8434 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8435 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8436 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8437 }
8438#ifndef IN_RING3
8439 else if (fPostponeFail)
8440 {
8441 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8442 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8443 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8444 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_2ND;
8445 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8446 return iemSetPassUpStatus(pVCpu, rcStrict);
8447 }
8448#endif
8449 else
8450 {
8451 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8452 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8453 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict2) ));
8454 return rcStrict2;
8455 }
8456 }
8457 }
8458#ifndef IN_RING3
8459 else if (fPostponeFail)
8460 {
8461 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (postponed)\n",
8462 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8463 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8464 if (!cbSecond)
8465 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST;
8466 else
8467 pVCpu->iem.s.aMemMappings[iMemMap].fAccess |= IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND;
8468 VMCPU_FF_SET(pVCpu, VMCPU_FF_IEM);
8469 return iemSetPassUpStatus(pVCpu, rcStrict);
8470 }
8471#endif
8472 else
8473 {
8474 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysWrite GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8475 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, VBOXSTRICTRC_VAL(rcStrict),
8476 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8477 return rcStrict;
8478 }
8479 }
8480 else
8481 {
8482 /*
8483 * No access handlers, much simpler.
8484 */
8485 int rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pbBuf, cbFirst);
8486 if (RT_SUCCESS(rc))
8487 {
8488 if (cbSecond)
8489 {
8490 rc = PGMPhysSimpleWriteGCPhys(pVM, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pbBuf + cbFirst, cbSecond);
8491 if (RT_SUCCESS(rc))
8492 { /* likely */ }
8493 else
8494 {
8495 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x GCPhysSecond=%RGp/%#x %Rrc (!!)\n",
8496 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
8497 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond, rc));
8498 return rc;
8499 }
8500 }
8501 }
8502 else
8503 {
8504 Log(("iemMemBounceBufferCommitAndUnmap: PGMPhysSimpleWriteGCPhys GCPhysFirst=%RGp/%#x %Rrc [GCPhysSecond=%RGp/%#x] (!!)\n",
8505 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst, rc,
8506 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond));
8507 return rc;
8508 }
8509 }
8510 }
8511
8512#if defined(IEM_LOG_MEMORY_WRITES)
8513 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
8514 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0]));
8515 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond)
8516 Log(("IEM Wrote %RGp: %.*Rhxs [2nd page]\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
8517 RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond, 64),
8518 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst]));
8519
8520 size_t cbWrote = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst + pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
8521 g_cbIemWrote = cbWrote;
8522 memcpy(g_abIemWrote, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], RT_MIN(cbWrote, sizeof(g_abIemWrote)));
8523#endif
8524
8525 /*
8526 * Free the mapping entry.
8527 */
8528 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8529 Assert(pVCpu->iem.s.cActiveMappings != 0);
8530 pVCpu->iem.s.cActiveMappings--;
8531 return VINF_SUCCESS;
8532}
8533
8534
8535/**
8536 * iemMemMap worker that deals with a request crossing pages.
8537 */
8538IEM_STATIC VBOXSTRICTRC
8539iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess)
8540{
8541 /*
8542 * Do the address translations.
8543 */
8544 RTGCPHYS GCPhysFirst;
8545 VBOXSTRICTRC rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrFirst, fAccess, &GCPhysFirst);
8546 if (rcStrict != VINF_SUCCESS)
8547 return rcStrict;
8548
8549 RTGCPHYS GCPhysSecond;
8550 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, (GCPtrFirst + (cbMem - 1)) & ~(RTGCPTR)PAGE_OFFSET_MASK,
8551 fAccess, &GCPhysSecond);
8552 if (rcStrict != VINF_SUCCESS)
8553 return rcStrict;
8554 GCPhysSecond &= ~(RTGCPHYS)PAGE_OFFSET_MASK;
8555
8556 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
8557
8558 /*
8559 * Read in the current memory content if it's a read, execute or partial
8560 * write access.
8561 */
8562 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8563 uint32_t const cbFirstPage = PAGE_SIZE - (GCPhysFirst & PAGE_OFFSET_MASK);
8564 uint32_t const cbSecondPage = (uint32_t)(cbMem - cbFirstPage);
8565
8566 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8567 {
8568 if (!pVCpu->iem.s.fBypassHandlers)
8569 {
8570 /*
8571 * Must carefully deal with access handler status codes here,
8572 * makes the code a bit bloated.
8573 */
8574 rcStrict = PGMPhysRead(pVM, GCPhysFirst, pbBuf, cbFirstPage, PGMACCESSORIGIN_IEM);
8575 if (rcStrict == VINF_SUCCESS)
8576 {
8577 rcStrict = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8578 if (rcStrict == VINF_SUCCESS)
8579 { /*likely */ }
8580 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8581 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8582 else
8583 {
8584 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (!!)\n",
8585 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict) ));
8586 return rcStrict;
8587 }
8588 }
8589 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8590 {
8591 VBOXSTRICTRC rcStrict2 = PGMPhysRead(pVM, GCPhysSecond, pbBuf + cbFirstPage, cbSecondPage, PGMACCESSORIGIN_IEM);
8592 if (PGM_PHYS_RW_IS_SUCCESS(rcStrict2))
8593 {
8594 PGM_PHYS_RW_DO_UPDATE_STRICT_RC(rcStrict, rcStrict2);
8595 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8596 }
8597 else
8598 {
8599 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysSecond=%RGp rcStrict2=%Rrc (rcStrict=%Rrc) (!!)\n",
8600 GCPhysSecond, VBOXSTRICTRC_VAL(rcStrict2), VBOXSTRICTRC_VAL(rcStrict2) ));
8601 return rcStrict2;
8602 }
8603 }
8604 else
8605 {
8606 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8607 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8608 return rcStrict;
8609 }
8610 }
8611 else
8612 {
8613 /*
8614 * No informational status codes here, much more straight forward.
8615 */
8616 int rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf, GCPhysFirst, cbFirstPage);
8617 if (RT_SUCCESS(rc))
8618 {
8619 Assert(rc == VINF_SUCCESS);
8620 rc = PGMPhysSimpleReadGCPhys(pVM, pbBuf + cbFirstPage, GCPhysSecond, cbSecondPage);
8621 if (RT_SUCCESS(rc))
8622 Assert(rc == VINF_SUCCESS);
8623 else
8624 {
8625 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysSecond=%RGp rc=%Rrc (!!)\n", GCPhysSecond, rc));
8626 return rc;
8627 }
8628 }
8629 else
8630 {
8631 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rc=%Rrc (!!)\n", GCPhysFirst, rc));
8632 return rc;
8633 }
8634 }
8635 }
8636#ifdef VBOX_STRICT
8637 else
8638 memset(pbBuf, 0xcc, cbMem);
8639 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8640 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8641#endif
8642
8643 /*
8644 * Commit the bounce buffer entry.
8645 */
8646 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8647 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = GCPhysSecond;
8648 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbFirstPage;
8649 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = (uint16_t)cbSecondPage;
8650 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = false;
8651 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8652 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8653 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8654 pVCpu->iem.s.cActiveMappings++;
8655
8656 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8657 *ppvMem = pbBuf;
8658 return VINF_SUCCESS;
8659}
8660
8661
8662/**
8663 * iemMemMap woker that deals with iemMemPageMap failures.
8664 */
8665IEM_STATIC VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, size_t cbMem,
8666 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap)
8667{
8668 /*
8669 * Filter out conditions we can handle and the ones which shouldn't happen.
8670 */
8671 if ( rcMap != VERR_PGM_PHYS_TLB_CATCH_WRITE
8672 && rcMap != VERR_PGM_PHYS_TLB_CATCH_ALL
8673 && rcMap != VERR_PGM_PHYS_TLB_UNASSIGNED)
8674 {
8675 AssertReturn(RT_FAILURE_NP(rcMap), VERR_IEM_IPE_8);
8676 return rcMap;
8677 }
8678 pVCpu->iem.s.cPotentialExits++;
8679
8680 /*
8681 * Read in the current memory content if it's a read, execute or partial
8682 * write access.
8683 */
8684 uint8_t *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
8685 if (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_PARTIAL_WRITE))
8686 {
8687 if (rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED)
8688 memset(pbBuf, 0xff, cbMem);
8689 else
8690 {
8691 int rc;
8692 if (!pVCpu->iem.s.fBypassHandlers)
8693 {
8694 VBOXSTRICTRC rcStrict = PGMPhysRead(pVCpu->CTX_SUFF(pVM), GCPhysFirst, pbBuf, cbMem, PGMACCESSORIGIN_IEM);
8695 if (rcStrict == VINF_SUCCESS)
8696 { /* nothing */ }
8697 else if (PGM_PHYS_RW_IS_SUCCESS(rcStrict))
8698 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict);
8699 else
8700 {
8701 Log(("iemMemBounceBufferMapPhys: PGMPhysRead GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8702 GCPhysFirst, VBOXSTRICTRC_VAL(rcStrict) ));
8703 return rcStrict;
8704 }
8705 }
8706 else
8707 {
8708 rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), pbBuf, GCPhysFirst, cbMem);
8709 if (RT_SUCCESS(rc))
8710 { /* likely */ }
8711 else
8712 {
8713 Log(("iemMemBounceBufferMapPhys: PGMPhysSimpleReadGCPhys GCPhysFirst=%RGp rcStrict=%Rrc (!!)\n",
8714 GCPhysFirst, rc));
8715 return rc;
8716 }
8717 }
8718 }
8719 }
8720#ifdef VBOX_STRICT
8721 else
8722 memset(pbBuf, 0xcc, cbMem);
8723#endif
8724#ifdef VBOX_STRICT
8725 if (cbMem < sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab))
8726 memset(pbBuf + cbMem, 0xaa, sizeof(pVCpu->iem.s.aBounceBuffers[iMemMap].ab) - cbMem);
8727#endif
8728
8729 /*
8730 * Commit the bounce buffer entry.
8731 */
8732 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst = GCPhysFirst;
8733 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond = NIL_RTGCPHYS;
8734 pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst = (uint16_t)cbMem;
8735 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond = 0;
8736 pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned = rcMap == VERR_PGM_PHYS_TLB_UNASSIGNED;
8737 pVCpu->iem.s.aMemMappings[iMemMap].pv = pbBuf;
8738 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess | IEM_ACCESS_BOUNCE_BUFFERED;
8739 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8740 pVCpu->iem.s.cActiveMappings++;
8741
8742 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8743 *ppvMem = pbBuf;
8744 return VINF_SUCCESS;
8745}
8746
8747
8748
8749/**
8750 * Maps the specified guest memory for the given kind of access.
8751 *
8752 * This may be using bounce buffering of the memory if it's crossing a page
8753 * boundary or if there is an access handler installed for any of it. Because
8754 * of lock prefix guarantees, we're in for some extra clutter when this
8755 * happens.
8756 *
8757 * This may raise a \#GP, \#SS, \#PF or \#AC.
8758 *
8759 * @returns VBox strict status code.
8760 *
8761 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8762 * @param ppvMem Where to return the pointer to the mapped
8763 * memory.
8764 * @param cbMem The number of bytes to map. This is usually 1,
8765 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8766 * string operations it can be up to a page.
8767 * @param iSegReg The index of the segment register to use for
8768 * this access. The base and limits are checked.
8769 * Use UINT8_MAX to indicate that no segmentation
8770 * is required (for IDT, GDT and LDT accesses).
8771 * @param GCPtrMem The address of the guest memory.
8772 * @param fAccess How the memory is being accessed. The
8773 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8774 * how to map the memory, while the
8775 * IEM_ACCESS_WHAT_XXX bit is used when raising
8776 * exceptions.
8777 */
8778IEM_STATIC VBOXSTRICTRC
8779iemMemMap(PVMCPUCC pVCpu, void **ppvMem, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8780{
8781 /*
8782 * Check the input and figure out which mapping entry to use.
8783 */
8784 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 256 || cbMem == 108 || cbMem == 104 || cbMem == 102 || cbMem == 94); /* 512 is the max! */
8785 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8786 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8787
8788 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8789 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8790 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8791 {
8792 iMemMap = iemMemMapFindFree(pVCpu);
8793 AssertLogRelMsgReturn(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8794 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8795 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8796 pVCpu->iem.s.aMemMappings[2].fAccess),
8797 VERR_IEM_IPE_9);
8798 }
8799
8800 /*
8801 * Map the memory, checking that we can actually access it. If something
8802 * slightly complicated happens, fall back on bounce buffering.
8803 */
8804 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8805 if (rcStrict != VINF_SUCCESS)
8806 return rcStrict;
8807
8808 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem > PAGE_SIZE) /* Crossing a page boundary? */
8809 return iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, ppvMem, cbMem, GCPtrMem, fAccess);
8810
8811 RTGCPHYS GCPhysFirst;
8812 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8813 if (rcStrict != VINF_SUCCESS)
8814 return rcStrict;
8815
8816 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8817 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8818 if (fAccess & IEM_ACCESS_TYPE_READ)
8819 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8820
8821 void *pvMem;
8822 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8823 if (rcStrict != VINF_SUCCESS)
8824 return iemMemBounceBufferMapPhys(pVCpu, iMemMap, ppvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8825
8826 /*
8827 * Fill in the mapping table entry.
8828 */
8829 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8830 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8831 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8832 pVCpu->iem.s.cActiveMappings++;
8833
8834 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8835 *ppvMem = pvMem;
8836
8837 return VINF_SUCCESS;
8838}
8839
8840
8841/**
8842 * Commits the guest memory if bounce buffered and unmaps it.
8843 *
8844 * @returns Strict VBox status code.
8845 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8846 * @param pvMem The mapping.
8847 * @param fAccess The kind of access.
8848 */
8849IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8850{
8851 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8852 AssertReturn(iMemMap >= 0, iMemMap);
8853
8854 /* If it's bounce buffered, we may need to write back the buffer. */
8855 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8856 {
8857 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8858 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8859 }
8860 /* Otherwise unlock it. */
8861 else
8862 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8863
8864 /* Free the entry. */
8865 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
8866 Assert(pVCpu->iem.s.cActiveMappings != 0);
8867 pVCpu->iem.s.cActiveMappings--;
8868 return VINF_SUCCESS;
8869}
8870
8871#ifdef IEM_WITH_SETJMP
8872
8873/**
8874 * Maps the specified guest memory for the given kind of access, longjmp on
8875 * error.
8876 *
8877 * This may be using bounce buffering of the memory if it's crossing a page
8878 * boundary or if there is an access handler installed for any of it. Because
8879 * of lock prefix guarantees, we're in for some extra clutter when this
8880 * happens.
8881 *
8882 * This may raise a \#GP, \#SS, \#PF or \#AC.
8883 *
8884 * @returns Pointer to the mapped memory.
8885 *
8886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8887 * @param cbMem The number of bytes to map. This is usually 1,
8888 * 2, 4, 6, 8, 12, 16, 32 or 512. When used by
8889 * string operations it can be up to a page.
8890 * @param iSegReg The index of the segment register to use for
8891 * this access. The base and limits are checked.
8892 * Use UINT8_MAX to indicate that no segmentation
8893 * is required (for IDT, GDT and LDT accesses).
8894 * @param GCPtrMem The address of the guest memory.
8895 * @param fAccess How the memory is being accessed. The
8896 * IEM_ACCESS_TYPE_XXX bit is used to figure out
8897 * how to map the memory, while the
8898 * IEM_ACCESS_WHAT_XXX bit is used when raising
8899 * exceptions.
8900 */
8901IEM_STATIC void *iemMemMapJmp(PVMCPUCC pVCpu, size_t cbMem, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t fAccess)
8902{
8903 /*
8904 * Check the input and figure out which mapping entry to use.
8905 */
8906 Assert(cbMem <= 64 || cbMem == 512 || cbMem == 108 || cbMem == 104 || cbMem == 94); /* 512 is the max! */
8907 Assert(~(fAccess & ~(IEM_ACCESS_TYPE_MASK | IEM_ACCESS_WHAT_MASK)));
8908 Assert(pVCpu->iem.s.cActiveMappings < RT_ELEMENTS(pVCpu->iem.s.aMemMappings));
8909
8910 unsigned iMemMap = pVCpu->iem.s.iNextMapping;
8911 if ( iMemMap >= RT_ELEMENTS(pVCpu->iem.s.aMemMappings)
8912 || pVCpu->iem.s.aMemMappings[iMemMap].fAccess != IEM_ACCESS_INVALID)
8913 {
8914 iMemMap = iemMemMapFindFree(pVCpu);
8915 AssertLogRelMsgStmt(iMemMap < RT_ELEMENTS(pVCpu->iem.s.aMemMappings),
8916 ("active=%d fAccess[0] = {%#x, %#x, %#x}\n", pVCpu->iem.s.cActiveMappings,
8917 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess,
8918 pVCpu->iem.s.aMemMappings[2].fAccess),
8919 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_9));
8920 }
8921
8922 /*
8923 * Map the memory, checking that we can actually access it. If something
8924 * slightly complicated happens, fall back on bounce buffering.
8925 */
8926 VBOXSTRICTRC rcStrict = iemMemApplySegment(pVCpu, fAccess, iSegReg, cbMem, &GCPtrMem);
8927 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8928 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8929
8930 /* Crossing a page boundary? */
8931 if ((GCPtrMem & PAGE_OFFSET_MASK) + cbMem <= PAGE_SIZE)
8932 { /* No (likely). */ }
8933 else
8934 {
8935 void *pvMem;
8936 rcStrict = iemMemBounceBufferMapCrossPage(pVCpu, iMemMap, &pvMem, cbMem, GCPtrMem, fAccess);
8937 if (rcStrict == VINF_SUCCESS)
8938 return pvMem;
8939 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8940 }
8941
8942 RTGCPHYS GCPhysFirst;
8943 rcStrict = iemMemPageTranslateAndCheckAccess(pVCpu, GCPtrMem, fAccess, &GCPhysFirst);
8944 if (rcStrict == VINF_SUCCESS) { /*likely*/ }
8945 else longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8946
8947 if (fAccess & IEM_ACCESS_TYPE_WRITE)
8948 Log8(("IEM WR %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8949 if (fAccess & IEM_ACCESS_TYPE_READ)
8950 Log9(("IEM RD %RGv (%RGp) LB %#zx\n", GCPtrMem, GCPhysFirst, cbMem));
8951
8952 void *pvMem;
8953 rcStrict = iemMemPageMap(pVCpu, GCPhysFirst, fAccess, &pvMem, &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
8954 if (rcStrict == VINF_SUCCESS)
8955 { /* likely */ }
8956 else
8957 {
8958 rcStrict = iemMemBounceBufferMapPhys(pVCpu, iMemMap, &pvMem, cbMem, GCPhysFirst, fAccess, rcStrict);
8959 if (rcStrict == VINF_SUCCESS)
8960 return pvMem;
8961 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8962 }
8963
8964 /*
8965 * Fill in the mapping table entry.
8966 */
8967 pVCpu->iem.s.aMemMappings[iMemMap].pv = pvMem;
8968 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = fAccess;
8969 pVCpu->iem.s.iNextMapping = iMemMap + 1;
8970 pVCpu->iem.s.cActiveMappings++;
8971
8972 iemMemUpdateWrittenCounter(pVCpu, fAccess, cbMem);
8973 return pvMem;
8974}
8975
8976
8977/**
8978 * Commits the guest memory if bounce buffered and unmaps it, longjmp on error.
8979 *
8980 * @param pVCpu The cross context virtual CPU structure of the calling thread.
8981 * @param pvMem The mapping.
8982 * @param fAccess The kind of access.
8983 */
8984IEM_STATIC void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
8985{
8986 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
8987 AssertStmt(iMemMap >= 0, longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), iMemMap));
8988
8989 /* If it's bounce buffered, we may need to write back the buffer. */
8990 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
8991 {
8992 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
8993 {
8994 VBOXSTRICTRC rcStrict = iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, false /*fPostponeFail*/);
8995 if (rcStrict == VINF_SUCCESS)
8996 return;
8997 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
8998 }
8999 }
9000 /* Otherwise unlock it. */
9001 else
9002 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9003
9004 /* Free the entry. */
9005 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9006 Assert(pVCpu->iem.s.cActiveMappings != 0);
9007 pVCpu->iem.s.cActiveMappings--;
9008}
9009
9010#endif /* IEM_WITH_SETJMP */
9011
9012#ifndef IN_RING3
9013/**
9014 * Commits the guest memory if bounce buffered and unmaps it, if any bounce
9015 * buffer part shows trouble it will be postponed to ring-3 (sets FF and stuff).
9016 *
9017 * Allows the instruction to be completed and retired, while the IEM user will
9018 * return to ring-3 immediately afterwards and do the postponed writes there.
9019 *
9020 * @returns VBox status code (no strict statuses). Caller must check
9021 * VMCPU_FF_IEM before repeating string instructions and similar stuff.
9022 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9023 * @param pvMem The mapping.
9024 * @param fAccess The kind of access.
9025 */
9026IEM_STATIC VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, void *pvMem, uint32_t fAccess)
9027{
9028 int iMemMap = iemMapLookup(pVCpu, pvMem, fAccess);
9029 AssertReturn(iMemMap >= 0, iMemMap);
9030
9031 /* If it's bounce buffered, we may need to write back the buffer. */
9032 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED)
9033 {
9034 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE)
9035 return iemMemBounceBufferCommitAndUnmap(pVCpu, iMemMap, true /*fPostponeFail*/);
9036 }
9037 /* Otherwise unlock it. */
9038 else
9039 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9040
9041 /* Free the entry. */
9042 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9043 Assert(pVCpu->iem.s.cActiveMappings != 0);
9044 pVCpu->iem.s.cActiveMappings--;
9045 return VINF_SUCCESS;
9046}
9047#endif
9048
9049
9050/**
9051 * Rollbacks mappings, releasing page locks and such.
9052 *
9053 * The caller shall only call this after checking cActiveMappings.
9054 *
9055 * @returns Strict VBox status code to pass up.
9056 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9057 */
9058IEM_STATIC void iemMemRollback(PVMCPUCC pVCpu)
9059{
9060 Assert(pVCpu->iem.s.cActiveMappings > 0);
9061
9062 uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
9063 while (iMemMap-- > 0)
9064 {
9065 uint32_t const fAccess = pVCpu->iem.s.aMemMappings[iMemMap].fAccess;
9066 if (fAccess != IEM_ACCESS_INVALID)
9067 {
9068 AssertMsg(!(fAccess & ~IEM_ACCESS_VALID_MASK) && fAccess != 0, ("%#x\n", fAccess));
9069 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
9070 if (!(fAccess & IEM_ACCESS_BOUNCE_BUFFERED))
9071 PGMPhysReleasePageMappingLock(pVCpu->CTX_SUFF(pVM), &pVCpu->iem.s.aMemMappingLocks[iMemMap].Lock);
9072 AssertMsg(pVCpu->iem.s.cActiveMappings > 0,
9073 ("iMemMap=%u fAccess=%#x pv=%p GCPhysFirst=%RGp GCPhysSecond=%RGp\n",
9074 iMemMap, fAccess, pVCpu->iem.s.aMemMappings[iMemMap].pv,
9075 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond));
9076 pVCpu->iem.s.cActiveMappings--;
9077 }
9078 }
9079}
9080
9081
9082/**
9083 * Fetches a data byte.
9084 *
9085 * @returns Strict VBox status code.
9086 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9087 * @param pu8Dst Where to return the byte.
9088 * @param iSegReg The index of the segment register to use for
9089 * this access. The base and limits are checked.
9090 * @param GCPtrMem The address of the guest memory.
9091 */
9092IEM_STATIC VBOXSTRICTRC iemMemFetchDataU8(PVMCPUCC pVCpu, uint8_t *pu8Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9093{
9094 /* The lazy approach for now... */
9095 uint8_t const *pu8Src;
9096 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Src, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9097 if (rc == VINF_SUCCESS)
9098 {
9099 *pu8Dst = *pu8Src;
9100 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9101 }
9102 return rc;
9103}
9104
9105
9106#ifdef IEM_WITH_SETJMP
9107/**
9108 * Fetches a data byte, longjmp on error.
9109 *
9110 * @returns The byte.
9111 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9112 * @param iSegReg The index of the segment register to use for
9113 * this access. The base and limits are checked.
9114 * @param GCPtrMem The address of the guest memory.
9115 */
9116DECL_NO_INLINE(IEM_STATIC, uint8_t) iemMemFetchDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9117{
9118 /* The lazy approach for now... */
9119 uint8_t const *pu8Src = (uint8_t const *)iemMemMapJmp(pVCpu, sizeof(*pu8Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9120 uint8_t const bRet = *pu8Src;
9121 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu8Src, IEM_ACCESS_DATA_R);
9122 return bRet;
9123}
9124#endif /* IEM_WITH_SETJMP */
9125
9126
9127/**
9128 * Fetches a data word.
9129 *
9130 * @returns Strict VBox status code.
9131 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9132 * @param pu16Dst Where to return the word.
9133 * @param iSegReg The index of the segment register to use for
9134 * this access. The base and limits are checked.
9135 * @param GCPtrMem The address of the guest memory.
9136 */
9137IEM_STATIC VBOXSTRICTRC iemMemFetchDataU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9138{
9139 /* The lazy approach for now... */
9140 uint16_t const *pu16Src;
9141 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9142 if (rc == VINF_SUCCESS)
9143 {
9144 *pu16Dst = *pu16Src;
9145 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9146 }
9147 return rc;
9148}
9149
9150
9151#ifdef IEM_WITH_SETJMP
9152/**
9153 * Fetches a data word, longjmp on error.
9154 *
9155 * @returns The word
9156 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9157 * @param iSegReg The index of the segment register to use for
9158 * this access. The base and limits are checked.
9159 * @param GCPtrMem The address of the guest memory.
9160 */
9161DECL_NO_INLINE(IEM_STATIC, uint16_t) iemMemFetchDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9162{
9163 /* The lazy approach for now... */
9164 uint16_t const *pu16Src = (uint16_t const *)iemMemMapJmp(pVCpu, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9165 uint16_t const u16Ret = *pu16Src;
9166 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu16Src, IEM_ACCESS_DATA_R);
9167 return u16Ret;
9168}
9169#endif
9170
9171
9172/**
9173 * Fetches a data dword.
9174 *
9175 * @returns Strict VBox status code.
9176 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9177 * @param pu32Dst Where to return the dword.
9178 * @param iSegReg The index of the segment register to use for
9179 * this access. The base and limits are checked.
9180 * @param GCPtrMem The address of the guest memory.
9181 */
9182IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9183{
9184 /* The lazy approach for now... */
9185 uint32_t const *pu32Src;
9186 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9187 if (rc == VINF_SUCCESS)
9188 {
9189 *pu32Dst = *pu32Src;
9190 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9191 }
9192 return rc;
9193}
9194
9195
9196/**
9197 * Fetches a data dword and zero extends it to a qword.
9198 *
9199 * @returns Strict VBox status code.
9200 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9201 * @param pu64Dst Where to return the qword.
9202 * @param iSegReg The index of the segment register to use for
9203 * this access. The base and limits are checked.
9204 * @param GCPtrMem The address of the guest memory.
9205 */
9206IEM_STATIC VBOXSTRICTRC iemMemFetchDataU32_ZX_U64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9207{
9208 /* The lazy approach for now... */
9209 uint32_t const *pu32Src;
9210 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9211 if (rc == VINF_SUCCESS)
9212 {
9213 *pu64Dst = *pu32Src;
9214 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9215 }
9216 return rc;
9217}
9218
9219
9220#ifdef IEM_WITH_SETJMP
9221
9222IEM_STATIC RTGCPTR iemMemApplySegmentToReadJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9223{
9224 Assert(cbMem >= 1);
9225 Assert(iSegReg < X86_SREG_COUNT);
9226
9227 /*
9228 * 64-bit mode is simpler.
9229 */
9230 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9231 {
9232 if (iSegReg >= X86_SREG_FS)
9233 {
9234 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9235 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9236 GCPtrMem += pSel->u64Base;
9237 }
9238
9239 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9240 return GCPtrMem;
9241 }
9242 /*
9243 * 16-bit and 32-bit segmentation.
9244 */
9245 else
9246 {
9247 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9248 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9249 if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9250 == X86DESCATTR_P /* data, expand up */
9251 || (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ))
9252 == (X86DESCATTR_P | X86_SEL_TYPE_CODE | X86_SEL_TYPE_READ) /* code, read-only */ )
9253 {
9254 /* expand up */
9255 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9256 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9257 && GCPtrLast32 > (uint32_t)GCPtrMem))
9258 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9259 }
9260 else if ( (pSel->Attr.u & (X86DESCATTR_P | X86DESCATTR_UNUSABLE | X86_SEL_TYPE_CODE | X86_SEL_TYPE_DOWN))
9261 == (X86DESCATTR_P | X86_SEL_TYPE_DOWN) /* data, expand down */ )
9262 {
9263 /* expand down */
9264 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9265 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9266 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9267 && GCPtrLast32 > (uint32_t)GCPtrMem))
9268 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9269 }
9270 else
9271 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9272 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_R);
9273 }
9274 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9275}
9276
9277
9278IEM_STATIC RTGCPTR iemMemApplySegmentToWriteJmp(PVMCPUCC pVCpu, uint8_t iSegReg, size_t cbMem, RTGCPTR GCPtrMem)
9279{
9280 Assert(cbMem >= 1);
9281 Assert(iSegReg < X86_SREG_COUNT);
9282
9283 /*
9284 * 64-bit mode is simpler.
9285 */
9286 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9287 {
9288 if (iSegReg >= X86_SREG_FS)
9289 {
9290 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9291 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9292 GCPtrMem += pSel->u64Base;
9293 }
9294
9295 if (RT_LIKELY(X86_IS_CANONICAL(GCPtrMem) && X86_IS_CANONICAL(GCPtrMem + cbMem - 1)))
9296 return GCPtrMem;
9297 }
9298 /*
9299 * 16-bit and 32-bit segmentation.
9300 */
9301 else
9302 {
9303 IEM_CTX_IMPORT_JMP(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(iSegReg));
9304 PCPUMSELREGHID pSel = iemSRegGetHid(pVCpu, iSegReg);
9305 uint32_t const fRelevantAttrs = pSel->Attr.u & ( X86DESCATTR_P | X86DESCATTR_UNUSABLE
9306 | X86_SEL_TYPE_CODE | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN);
9307 if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE)) /* data, expand up */
9308 {
9309 /* expand up */
9310 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9311 if (RT_LIKELY( GCPtrLast32 > pSel->u32Limit
9312 && GCPtrLast32 > (uint32_t)GCPtrMem))
9313 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9314 }
9315 else if (fRelevantAttrs == (X86DESCATTR_P | X86_SEL_TYPE_WRITE | X86_SEL_TYPE_DOWN)) /* data, expand up */
9316 {
9317 /* expand down */
9318 uint32_t GCPtrLast32 = (uint32_t)GCPtrMem + (uint32_t)cbMem;
9319 if (RT_LIKELY( (uint32_t)GCPtrMem > pSel->u32Limit
9320 && GCPtrLast32 <= (pSel->Attr.n.u1DefBig ? UINT32_MAX : UINT32_C(0xffff))
9321 && GCPtrLast32 > (uint32_t)GCPtrMem))
9322 return (uint32_t)GCPtrMem + (uint32_t)pSel->u64Base;
9323 }
9324 else
9325 iemRaiseSelectorInvalidAccessJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9326 iemRaiseSelectorBoundsJmp(pVCpu, iSegReg, IEM_ACCESS_DATA_W);
9327 }
9328 iemRaiseGeneralProtectionFault0Jmp(pVCpu);
9329}
9330
9331
9332/**
9333 * Fetches a data dword, longjmp on error, fallback/safe version.
9334 *
9335 * @returns The dword
9336 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9337 * @param iSegReg The index of the segment register to use for
9338 * this access. The base and limits are checked.
9339 * @param GCPtrMem The address of the guest memory.
9340 */
9341IEM_STATIC uint32_t iemMemFetchDataU32SafeJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9342{
9343 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9344 uint32_t const u32Ret = *pu32Src;
9345 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9346 return u32Ret;
9347}
9348
9349
9350/**
9351 * Fetches a data dword, longjmp on error.
9352 *
9353 * @returns The dword
9354 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9355 * @param iSegReg The index of the segment register to use for
9356 * this access. The base and limits are checked.
9357 * @param GCPtrMem The address of the guest memory.
9358 */
9359DECL_NO_INLINE(IEM_STATIC, uint32_t) iemMemFetchDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9360{
9361# ifdef IEM_WITH_DATA_TLB
9362 RTGCPTR GCPtrEff = iemMemApplySegmentToReadJmp(pVCpu, iSegReg, sizeof(uint32_t), GCPtrMem);
9363 if (RT_LIKELY((GCPtrEff & X86_PAGE_OFFSET_MASK) <= X86_PAGE_SIZE - sizeof(uint32_t)))
9364 {
9365 /// @todo more later.
9366 }
9367
9368 return iemMemFetchDataU32SafeJmp(pVCpu, iSegReg, GCPtrMem);
9369# else
9370 /* The lazy approach. */
9371 uint32_t const *pu32Src = (uint32_t const *)iemMemMapJmp(pVCpu, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9372 uint32_t const u32Ret = *pu32Src;
9373 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu32Src, IEM_ACCESS_DATA_R);
9374 return u32Ret;
9375# endif
9376}
9377#endif
9378
9379
9380#ifdef SOME_UNUSED_FUNCTION
9381/**
9382 * Fetches a data dword and sign extends it to a qword.
9383 *
9384 * @returns Strict VBox status code.
9385 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9386 * @param pu64Dst Where to return the sign extended value.
9387 * @param iSegReg The index of the segment register to use for
9388 * this access. The base and limits are checked.
9389 * @param GCPtrMem The address of the guest memory.
9390 */
9391IEM_STATIC VBOXSTRICTRC iemMemFetchDataS32SxU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9392{
9393 /* The lazy approach for now... */
9394 int32_t const *pi32Src;
9395 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pi32Src, sizeof(*pi32Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9396 if (rc == VINF_SUCCESS)
9397 {
9398 *pu64Dst = *pi32Src;
9399 rc = iemMemCommitAndUnmap(pVCpu, (void *)pi32Src, IEM_ACCESS_DATA_R);
9400 }
9401#ifdef __GNUC__ /* warning: GCC may be a royal pain */
9402 else
9403 *pu64Dst = 0;
9404#endif
9405 return rc;
9406}
9407#endif
9408
9409
9410/**
9411 * Fetches a data qword.
9412 *
9413 * @returns Strict VBox status code.
9414 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9415 * @param pu64Dst Where to return the qword.
9416 * @param iSegReg The index of the segment register to use for
9417 * this access. The base and limits are checked.
9418 * @param GCPtrMem The address of the guest memory.
9419 */
9420IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9421{
9422 /* The lazy approach for now... */
9423 uint64_t const *pu64Src;
9424 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9425 if (rc == VINF_SUCCESS)
9426 {
9427 *pu64Dst = *pu64Src;
9428 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9429 }
9430 return rc;
9431}
9432
9433
9434#ifdef IEM_WITH_SETJMP
9435/**
9436 * Fetches a data qword, longjmp on error.
9437 *
9438 * @returns The qword.
9439 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9440 * @param iSegReg The index of the segment register to use for
9441 * this access. The base and limits are checked.
9442 * @param GCPtrMem The address of the guest memory.
9443 */
9444DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9445{
9446 /* The lazy approach for now... */
9447 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9448 uint64_t const u64Ret = *pu64Src;
9449 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9450 return u64Ret;
9451}
9452#endif
9453
9454
9455/**
9456 * Fetches a data qword, aligned at a 16 byte boundrary (for SSE).
9457 *
9458 * @returns Strict VBox status code.
9459 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9460 * @param pu64Dst Where to return the qword.
9461 * @param iSegReg The index of the segment register to use for
9462 * this access. The base and limits are checked.
9463 * @param GCPtrMem The address of the guest memory.
9464 */
9465IEM_STATIC VBOXSTRICTRC iemMemFetchDataU64AlignedU128(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9466{
9467 /* The lazy approach for now... */
9468 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9469 if (RT_UNLIKELY(GCPtrMem & 15))
9470 return iemRaiseGeneralProtectionFault0(pVCpu);
9471
9472 uint64_t const *pu64Src;
9473 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9474 if (rc == VINF_SUCCESS)
9475 {
9476 *pu64Dst = *pu64Src;
9477 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9478 }
9479 return rc;
9480}
9481
9482
9483#ifdef IEM_WITH_SETJMP
9484/**
9485 * Fetches a data qword, longjmp on error.
9486 *
9487 * @returns The qword.
9488 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9489 * @param iSegReg The index of the segment register to use for
9490 * this access. The base and limits are checked.
9491 * @param GCPtrMem The address of the guest memory.
9492 */
9493DECL_NO_INLINE(IEM_STATIC, uint64_t) iemMemFetchDataU64AlignedU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem)
9494{
9495 /* The lazy approach for now... */
9496 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9497 if (RT_LIKELY(!(GCPtrMem & 15)))
9498 {
9499 uint64_t const *pu64Src = (uint64_t const *)iemMemMapJmp(pVCpu, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9500 uint64_t const u64Ret = *pu64Src;
9501 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu64Src, IEM_ACCESS_DATA_R);
9502 return u64Ret;
9503 }
9504
9505 VBOXSTRICTRC rc = iemRaiseGeneralProtectionFault0(pVCpu);
9506 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rc));
9507}
9508#endif
9509
9510
9511/**
9512 * Fetches a data tword.
9513 *
9514 * @returns Strict VBox status code.
9515 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9516 * @param pr80Dst Where to return the tword.
9517 * @param iSegReg The index of the segment register to use for
9518 * this access. The base and limits are checked.
9519 * @param GCPtrMem The address of the guest memory.
9520 */
9521IEM_STATIC VBOXSTRICTRC iemMemFetchDataR80(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9522{
9523 /* The lazy approach for now... */
9524 PCRTFLOAT80U pr80Src;
9525 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pr80Src, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9526 if (rc == VINF_SUCCESS)
9527 {
9528 *pr80Dst = *pr80Src;
9529 rc = iemMemCommitAndUnmap(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9530 }
9531 return rc;
9532}
9533
9534
9535#ifdef IEM_WITH_SETJMP
9536/**
9537 * Fetches a data tword, longjmp on error.
9538 *
9539 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9540 * @param pr80Dst Where to return the tword.
9541 * @param iSegReg The index of the segment register to use for
9542 * this access. The base and limits are checked.
9543 * @param GCPtrMem The address of the guest memory.
9544 */
9545DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataR80Jmp(PVMCPUCC pVCpu, PRTFLOAT80U pr80Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9546{
9547 /* The lazy approach for now... */
9548 PCRTFLOAT80U pr80Src = (PCRTFLOAT80U)iemMemMapJmp(pVCpu, sizeof(*pr80Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9549 *pr80Dst = *pr80Src;
9550 iemMemCommitAndUnmapJmp(pVCpu, (void *)pr80Src, IEM_ACCESS_DATA_R);
9551}
9552#endif
9553
9554
9555/**
9556 * Fetches a data dqword (double qword), generally SSE related.
9557 *
9558 * @returns Strict VBox status code.
9559 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9560 * @param pu128Dst Where to return the qword.
9561 * @param iSegReg The index of the segment register to use for
9562 * this access. The base and limits are checked.
9563 * @param GCPtrMem The address of the guest memory.
9564 */
9565IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9566{
9567 /* The lazy approach for now... */
9568 PCRTUINT128U pu128Src;
9569 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9570 if (rc == VINF_SUCCESS)
9571 {
9572 pu128Dst->au64[0] = pu128Src->au64[0];
9573 pu128Dst->au64[1] = pu128Src->au64[1];
9574 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9575 }
9576 return rc;
9577}
9578
9579
9580#ifdef IEM_WITH_SETJMP
9581/**
9582 * Fetches a data dqword (double qword), generally SSE related.
9583 *
9584 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9585 * @param pu128Dst Where to return the qword.
9586 * @param iSegReg The index of the segment register to use for
9587 * this access. The base and limits are checked.
9588 * @param GCPtrMem The address of the guest memory.
9589 */
9590IEM_STATIC void iemMemFetchDataU128Jmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9591{
9592 /* The lazy approach for now... */
9593 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9594 pu128Dst->au64[0] = pu128Src->au64[0];
9595 pu128Dst->au64[1] = pu128Src->au64[1];
9596 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9597}
9598#endif
9599
9600
9601/**
9602 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9603 * related.
9604 *
9605 * Raises \#GP(0) if not aligned.
9606 *
9607 * @returns Strict VBox status code.
9608 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9609 * @param pu128Dst Where to return the qword.
9610 * @param iSegReg The index of the segment register to use for
9611 * this access. The base and limits are checked.
9612 * @param GCPtrMem The address of the guest memory.
9613 */
9614IEM_STATIC VBOXSTRICTRC iemMemFetchDataU128AlignedSse(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9615{
9616 /* The lazy approach for now... */
9617 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9618 if ( (GCPtrMem & 15)
9619 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9620 return iemRaiseGeneralProtectionFault0(pVCpu);
9621
9622 PCRTUINT128U pu128Src;
9623 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Src, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9624 if (rc == VINF_SUCCESS)
9625 {
9626 pu128Dst->au64[0] = pu128Src->au64[0];
9627 pu128Dst->au64[1] = pu128Src->au64[1];
9628 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9629 }
9630 return rc;
9631}
9632
9633
9634#ifdef IEM_WITH_SETJMP
9635/**
9636 * Fetches a data dqword (double qword) at an aligned address, generally SSE
9637 * related, longjmp on error.
9638 *
9639 * Raises \#GP(0) if not aligned.
9640 *
9641 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9642 * @param pu128Dst Where to return the qword.
9643 * @param iSegReg The index of the segment register to use for
9644 * this access. The base and limits are checked.
9645 * @param GCPtrMem The address of the guest memory.
9646 */
9647DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU128AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT128U pu128Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9648{
9649 /* The lazy approach for now... */
9650 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on SSE stuff. */
9651 if ( (GCPtrMem & 15) == 0
9652 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
9653 {
9654 PCRTUINT128U pu128Src = (PCRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9655 pu128Dst->au64[0] = pu128Src->au64[0];
9656 pu128Dst->au64[1] = pu128Src->au64[1];
9657 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu128Src, IEM_ACCESS_DATA_R);
9658 return;
9659 }
9660
9661 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9662 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9663}
9664#endif
9665
9666
9667/**
9668 * Fetches a data oword (octo word), generally AVX related.
9669 *
9670 * @returns Strict VBox status code.
9671 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9672 * @param pu256Dst Where to return the qword.
9673 * @param iSegReg The index of the segment register to use for
9674 * this access. The base and limits are checked.
9675 * @param GCPtrMem The address of the guest memory.
9676 */
9677IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9678{
9679 /* The lazy approach for now... */
9680 PCRTUINT256U pu256Src;
9681 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9682 if (rc == VINF_SUCCESS)
9683 {
9684 pu256Dst->au64[0] = pu256Src->au64[0];
9685 pu256Dst->au64[1] = pu256Src->au64[1];
9686 pu256Dst->au64[2] = pu256Src->au64[2];
9687 pu256Dst->au64[3] = pu256Src->au64[3];
9688 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9689 }
9690 return rc;
9691}
9692
9693
9694#ifdef IEM_WITH_SETJMP
9695/**
9696 * Fetches a data oword (octo word), generally AVX related.
9697 *
9698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9699 * @param pu256Dst Where to return the qword.
9700 * @param iSegReg The index of the segment register to use for
9701 * this access. The base and limits are checked.
9702 * @param GCPtrMem The address of the guest memory.
9703 */
9704IEM_STATIC void iemMemFetchDataU256Jmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9705{
9706 /* The lazy approach for now... */
9707 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9708 pu256Dst->au64[0] = pu256Src->au64[0];
9709 pu256Dst->au64[1] = pu256Src->au64[1];
9710 pu256Dst->au64[2] = pu256Src->au64[2];
9711 pu256Dst->au64[3] = pu256Src->au64[3];
9712 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9713}
9714#endif
9715
9716
9717/**
9718 * Fetches a data oword (octo word) at an aligned address, generally AVX
9719 * related.
9720 *
9721 * Raises \#GP(0) if not aligned.
9722 *
9723 * @returns Strict VBox status code.
9724 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9725 * @param pu256Dst Where to return the qword.
9726 * @param iSegReg The index of the segment register to use for
9727 * this access. The base and limits are checked.
9728 * @param GCPtrMem The address of the guest memory.
9729 */
9730IEM_STATIC VBOXSTRICTRC iemMemFetchDataU256AlignedSse(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9731{
9732 /* The lazy approach for now... */
9733 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9734 if (GCPtrMem & 31)
9735 return iemRaiseGeneralProtectionFault0(pVCpu);
9736
9737 PCRTUINT256U pu256Src;
9738 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Src, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9739 if (rc == VINF_SUCCESS)
9740 {
9741 pu256Dst->au64[0] = pu256Src->au64[0];
9742 pu256Dst->au64[1] = pu256Src->au64[1];
9743 pu256Dst->au64[2] = pu256Src->au64[2];
9744 pu256Dst->au64[3] = pu256Src->au64[3];
9745 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9746 }
9747 return rc;
9748}
9749
9750
9751#ifdef IEM_WITH_SETJMP
9752/**
9753 * Fetches a data oword (octo word) at an aligned address, generally AVX
9754 * related, longjmp on error.
9755 *
9756 * Raises \#GP(0) if not aligned.
9757 *
9758 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9759 * @param pu256Dst Where to return the qword.
9760 * @param iSegReg The index of the segment register to use for
9761 * this access. The base and limits are checked.
9762 * @param GCPtrMem The address of the guest memory.
9763 */
9764DECL_NO_INLINE(IEM_STATIC, void) iemMemFetchDataU256AlignedSseJmp(PVMCPUCC pVCpu, PRTUINT256U pu256Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
9765{
9766 /* The lazy approach for now... */
9767 /** @todo testcase: Ordering of \#SS(0) vs \#GP() vs \#PF on AVX stuff. */
9768 if ((GCPtrMem & 31) == 0)
9769 {
9770 PCRTUINT256U pu256Src = (PCRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Src), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R);
9771 pu256Dst->au64[0] = pu256Src->au64[0];
9772 pu256Dst->au64[1] = pu256Src->au64[1];
9773 pu256Dst->au64[2] = pu256Src->au64[2];
9774 pu256Dst->au64[3] = pu256Src->au64[3];
9775 iemMemCommitAndUnmapJmp(pVCpu, (void *)pu256Src, IEM_ACCESS_DATA_R);
9776 return;
9777 }
9778
9779 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
9780 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
9781}
9782#endif
9783
9784
9785
9786/**
9787 * Fetches a descriptor register (lgdt, lidt).
9788 *
9789 * @returns Strict VBox status code.
9790 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9791 * @param pcbLimit Where to return the limit.
9792 * @param pGCPtrBase Where to return the base.
9793 * @param iSegReg The index of the segment register to use for
9794 * this access. The base and limits are checked.
9795 * @param GCPtrMem The address of the guest memory.
9796 * @param enmOpSize The effective operand size.
9797 */
9798IEM_STATIC VBOXSTRICTRC iemMemFetchDataXdtr(PVMCPUCC pVCpu, uint16_t *pcbLimit, PRTGCPTR pGCPtrBase, uint8_t iSegReg,
9799 RTGCPTR GCPtrMem, IEMMODE enmOpSize)
9800{
9801 /*
9802 * Just like SIDT and SGDT, the LIDT and LGDT instructions are a
9803 * little special:
9804 * - The two reads are done separately.
9805 * - Operand size override works in 16-bit and 32-bit code, but 64-bit.
9806 * - We suspect the 386 to actually commit the limit before the base in
9807 * some cases (search for 386 in bs3CpuBasic2_lidt_lgdt_One). We
9808 * don't try emulate this eccentric behavior, because it's not well
9809 * enough understood and rather hard to trigger.
9810 * - The 486 seems to do a dword limit read when the operand size is 32-bit.
9811 */
9812 VBOXSTRICTRC rcStrict;
9813 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT)
9814 {
9815 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9816 if (rcStrict == VINF_SUCCESS)
9817 rcStrict = iemMemFetchDataU64(pVCpu, pGCPtrBase, iSegReg, GCPtrMem + 2);
9818 }
9819 else
9820 {
9821 uint32_t uTmp = 0; /* (Visual C++ maybe used uninitialized) */
9822 if (enmOpSize == IEMMODE_32BIT)
9823 {
9824 if (IEM_GET_TARGET_CPU(pVCpu) != IEMTARGETCPU_486)
9825 {
9826 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9827 if (rcStrict == VINF_SUCCESS)
9828 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9829 }
9830 else
9831 {
9832 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem);
9833 if (rcStrict == VINF_SUCCESS)
9834 {
9835 *pcbLimit = (uint16_t)uTmp;
9836 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9837 }
9838 }
9839 if (rcStrict == VINF_SUCCESS)
9840 *pGCPtrBase = uTmp;
9841 }
9842 else
9843 {
9844 rcStrict = iemMemFetchDataU16(pVCpu, pcbLimit, iSegReg, GCPtrMem);
9845 if (rcStrict == VINF_SUCCESS)
9846 {
9847 rcStrict = iemMemFetchDataU32(pVCpu, &uTmp, iSegReg, GCPtrMem + 2);
9848 if (rcStrict == VINF_SUCCESS)
9849 *pGCPtrBase = uTmp & UINT32_C(0x00ffffff);
9850 }
9851 }
9852 }
9853 return rcStrict;
9854}
9855
9856
9857
9858/**
9859 * Stores a data byte.
9860 *
9861 * @returns Strict VBox status code.
9862 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9863 * @param iSegReg The index of the segment register to use for
9864 * this access. The base and limits are checked.
9865 * @param GCPtrMem The address of the guest memory.
9866 * @param u8Value The value to store.
9867 */
9868IEM_STATIC VBOXSTRICTRC iemMemStoreDataU8(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9869{
9870 /* The lazy approach for now... */
9871 uint8_t *pu8Dst;
9872 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu8Dst, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9873 if (rc == VINF_SUCCESS)
9874 {
9875 *pu8Dst = u8Value;
9876 rc = iemMemCommitAndUnmap(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9877 }
9878 return rc;
9879}
9880
9881
9882#ifdef IEM_WITH_SETJMP
9883/**
9884 * Stores a data byte, longjmp on error.
9885 *
9886 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9887 * @param iSegReg The index of the segment register to use for
9888 * this access. The base and limits are checked.
9889 * @param GCPtrMem The address of the guest memory.
9890 * @param u8Value The value to store.
9891 */
9892IEM_STATIC void iemMemStoreDataU8Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint8_t u8Value)
9893{
9894 /* The lazy approach for now... */
9895 uint8_t *pu8Dst = (uint8_t *)iemMemMapJmp(pVCpu, sizeof(*pu8Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9896 *pu8Dst = u8Value;
9897 iemMemCommitAndUnmapJmp(pVCpu, pu8Dst, IEM_ACCESS_DATA_W);
9898}
9899#endif
9900
9901
9902/**
9903 * Stores a data word.
9904 *
9905 * @returns Strict VBox status code.
9906 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9907 * @param iSegReg The index of the segment register to use for
9908 * this access. The base and limits are checked.
9909 * @param GCPtrMem The address of the guest memory.
9910 * @param u16Value The value to store.
9911 */
9912IEM_STATIC VBOXSTRICTRC iemMemStoreDataU16(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9913{
9914 /* The lazy approach for now... */
9915 uint16_t *pu16Dst;
9916 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9917 if (rc == VINF_SUCCESS)
9918 {
9919 *pu16Dst = u16Value;
9920 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9921 }
9922 return rc;
9923}
9924
9925
9926#ifdef IEM_WITH_SETJMP
9927/**
9928 * Stores a data word, longjmp on error.
9929 *
9930 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9931 * @param iSegReg The index of the segment register to use for
9932 * this access. The base and limits are checked.
9933 * @param GCPtrMem The address of the guest memory.
9934 * @param u16Value The value to store.
9935 */
9936IEM_STATIC void iemMemStoreDataU16Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint16_t u16Value)
9937{
9938 /* The lazy approach for now... */
9939 uint16_t *pu16Dst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(*pu16Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9940 *pu16Dst = u16Value;
9941 iemMemCommitAndUnmapJmp(pVCpu, pu16Dst, IEM_ACCESS_DATA_W);
9942}
9943#endif
9944
9945
9946/**
9947 * Stores a data dword.
9948 *
9949 * @returns Strict VBox status code.
9950 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9951 * @param iSegReg The index of the segment register to use for
9952 * this access. The base and limits are checked.
9953 * @param GCPtrMem The address of the guest memory.
9954 * @param u32Value The value to store.
9955 */
9956IEM_STATIC VBOXSTRICTRC iemMemStoreDataU32(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9957{
9958 /* The lazy approach for now... */
9959 uint32_t *pu32Dst;
9960 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9961 if (rc == VINF_SUCCESS)
9962 {
9963 *pu32Dst = u32Value;
9964 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9965 }
9966 return rc;
9967}
9968
9969
9970#ifdef IEM_WITH_SETJMP
9971/**
9972 * Stores a data dword.
9973 *
9974 * @returns Strict VBox status code.
9975 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9976 * @param iSegReg The index of the segment register to use for
9977 * this access. The base and limits are checked.
9978 * @param GCPtrMem The address of the guest memory.
9979 * @param u32Value The value to store.
9980 */
9981IEM_STATIC void iemMemStoreDataU32Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint32_t u32Value)
9982{
9983 /* The lazy approach for now... */
9984 uint32_t *pu32Dst = (uint32_t *)iemMemMapJmp(pVCpu, sizeof(*pu32Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
9985 *pu32Dst = u32Value;
9986 iemMemCommitAndUnmapJmp(pVCpu, pu32Dst, IEM_ACCESS_DATA_W);
9987}
9988#endif
9989
9990
9991/**
9992 * Stores a data qword.
9993 *
9994 * @returns Strict VBox status code.
9995 * @param pVCpu The cross context virtual CPU structure of the calling thread.
9996 * @param iSegReg The index of the segment register to use for
9997 * this access. The base and limits are checked.
9998 * @param GCPtrMem The address of the guest memory.
9999 * @param u64Value The value to store.
10000 */
10001IEM_STATIC VBOXSTRICTRC iemMemStoreDataU64(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10002{
10003 /* The lazy approach for now... */
10004 uint64_t *pu64Dst;
10005 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10006 if (rc == VINF_SUCCESS)
10007 {
10008 *pu64Dst = u64Value;
10009 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10010 }
10011 return rc;
10012}
10013
10014
10015#ifdef IEM_WITH_SETJMP
10016/**
10017 * Stores a data qword, longjmp on error.
10018 *
10019 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10020 * @param iSegReg The index of the segment register to use for
10021 * this access. The base and limits are checked.
10022 * @param GCPtrMem The address of the guest memory.
10023 * @param u64Value The value to store.
10024 */
10025IEM_STATIC void iemMemStoreDataU64Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, uint64_t u64Value)
10026{
10027 /* The lazy approach for now... */
10028 uint64_t *pu64Dst = (uint64_t *)iemMemMapJmp(pVCpu, sizeof(*pu64Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10029 *pu64Dst = u64Value;
10030 iemMemCommitAndUnmapJmp(pVCpu, pu64Dst, IEM_ACCESS_DATA_W);
10031}
10032#endif
10033
10034
10035/**
10036 * Stores a data dqword.
10037 *
10038 * @returns Strict VBox status code.
10039 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10040 * @param iSegReg The index of the segment register to use for
10041 * this access. The base and limits are checked.
10042 * @param GCPtrMem The address of the guest memory.
10043 * @param u128Value The value to store.
10044 */
10045IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10046{
10047 /* The lazy approach for now... */
10048 PRTUINT128U pu128Dst;
10049 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10050 if (rc == VINF_SUCCESS)
10051 {
10052 pu128Dst->au64[0] = u128Value.au64[0];
10053 pu128Dst->au64[1] = u128Value.au64[1];
10054 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10055 }
10056 return rc;
10057}
10058
10059
10060#ifdef IEM_WITH_SETJMP
10061/**
10062 * Stores a data dqword, longjmp on error.
10063 *
10064 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10065 * @param iSegReg The index of the segment register to use for
10066 * this access. The base and limits are checked.
10067 * @param GCPtrMem The address of the guest memory.
10068 * @param u128Value The value to store.
10069 */
10070IEM_STATIC void iemMemStoreDataU128Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10071{
10072 /* The lazy approach for now... */
10073 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10074 pu128Dst->au64[0] = u128Value.au64[0];
10075 pu128Dst->au64[1] = u128Value.au64[1];
10076 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10077}
10078#endif
10079
10080
10081/**
10082 * Stores a data dqword, SSE aligned.
10083 *
10084 * @returns Strict VBox status code.
10085 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10086 * @param iSegReg The index of the segment register to use for
10087 * this access. The base and limits are checked.
10088 * @param GCPtrMem The address of the guest memory.
10089 * @param u128Value The value to store.
10090 */
10091IEM_STATIC VBOXSTRICTRC iemMemStoreDataU128AlignedSse(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10092{
10093 /* The lazy approach for now... */
10094 if ( (GCPtrMem & 15)
10095 && !(pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10096 return iemRaiseGeneralProtectionFault0(pVCpu);
10097
10098 PRTUINT128U pu128Dst;
10099 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu128Dst, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10100 if (rc == VINF_SUCCESS)
10101 {
10102 pu128Dst->au64[0] = u128Value.au64[0];
10103 pu128Dst->au64[1] = u128Value.au64[1];
10104 rc = iemMemCommitAndUnmap(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10105 }
10106 return rc;
10107}
10108
10109
10110#ifdef IEM_WITH_SETJMP
10111/**
10112 * Stores a data dqword, SSE aligned.
10113 *
10114 * @returns Strict VBox status code.
10115 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10116 * @param iSegReg The index of the segment register to use for
10117 * this access. The base and limits are checked.
10118 * @param GCPtrMem The address of the guest memory.
10119 * @param u128Value The value to store.
10120 */
10121DECL_NO_INLINE(IEM_STATIC, void)
10122iemMemStoreDataU128AlignedSseJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, RTUINT128U u128Value)
10123{
10124 /* The lazy approach for now... */
10125 if ( (GCPtrMem & 15) == 0
10126 || (pVCpu->cpum.GstCtx.XState.x87.MXCSR & X86_MXCSR_MM)) /** @todo should probably check this *after* applying seg.u64Base... Check real HW. */
10127 {
10128 PRTUINT128U pu128Dst = (PRTUINT128U)iemMemMapJmp(pVCpu, sizeof(*pu128Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10129 pu128Dst->au64[0] = u128Value.au64[0];
10130 pu128Dst->au64[1] = u128Value.au64[1];
10131 iemMemCommitAndUnmapJmp(pVCpu, pu128Dst, IEM_ACCESS_DATA_W);
10132 return;
10133 }
10134
10135 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10136 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10137}
10138#endif
10139
10140
10141/**
10142 * Stores a data dqword.
10143 *
10144 * @returns Strict VBox status code.
10145 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10146 * @param iSegReg The index of the segment register to use for
10147 * this access. The base and limits are checked.
10148 * @param GCPtrMem The address of the guest memory.
10149 * @param pu256Value Pointer to the value to store.
10150 */
10151IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10152{
10153 /* The lazy approach for now... */
10154 PRTUINT256U pu256Dst;
10155 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10156 if (rc == VINF_SUCCESS)
10157 {
10158 pu256Dst->au64[0] = pu256Value->au64[0];
10159 pu256Dst->au64[1] = pu256Value->au64[1];
10160 pu256Dst->au64[2] = pu256Value->au64[2];
10161 pu256Dst->au64[3] = pu256Value->au64[3];
10162 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10163 }
10164 return rc;
10165}
10166
10167
10168#ifdef IEM_WITH_SETJMP
10169/**
10170 * Stores a data dqword, longjmp on error.
10171 *
10172 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10173 * @param iSegReg The index of the segment register to use for
10174 * this access. The base and limits are checked.
10175 * @param GCPtrMem The address of the guest memory.
10176 * @param pu256Value Pointer to the value to store.
10177 */
10178IEM_STATIC void iemMemStoreDataU256Jmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10179{
10180 /* The lazy approach for now... */
10181 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10182 pu256Dst->au64[0] = pu256Value->au64[0];
10183 pu256Dst->au64[1] = pu256Value->au64[1];
10184 pu256Dst->au64[2] = pu256Value->au64[2];
10185 pu256Dst->au64[3] = pu256Value->au64[3];
10186 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10187}
10188#endif
10189
10190
10191/**
10192 * Stores a data dqword, AVX aligned.
10193 *
10194 * @returns Strict VBox status code.
10195 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10196 * @param iSegReg The index of the segment register to use for
10197 * this access. The base and limits are checked.
10198 * @param GCPtrMem The address of the guest memory.
10199 * @param pu256Value Pointer to the value to store.
10200 */
10201IEM_STATIC VBOXSTRICTRC iemMemStoreDataU256AlignedAvx(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10202{
10203 /* The lazy approach for now... */
10204 if (GCPtrMem & 31)
10205 return iemRaiseGeneralProtectionFault0(pVCpu);
10206
10207 PRTUINT256U pu256Dst;
10208 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu256Dst, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10209 if (rc == VINF_SUCCESS)
10210 {
10211 pu256Dst->au64[0] = pu256Value->au64[0];
10212 pu256Dst->au64[1] = pu256Value->au64[1];
10213 pu256Dst->au64[2] = pu256Value->au64[2];
10214 pu256Dst->au64[3] = pu256Value->au64[3];
10215 rc = iemMemCommitAndUnmap(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10216 }
10217 return rc;
10218}
10219
10220
10221#ifdef IEM_WITH_SETJMP
10222/**
10223 * Stores a data dqword, AVX aligned.
10224 *
10225 * @returns Strict VBox status code.
10226 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10227 * @param iSegReg The index of the segment register to use for
10228 * this access. The base and limits are checked.
10229 * @param GCPtrMem The address of the guest memory.
10230 * @param pu256Value Pointer to the value to store.
10231 */
10232DECL_NO_INLINE(IEM_STATIC, void)
10233iemMemStoreDataU256AlignedAvxJmp(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem, PCRTUINT256U pu256Value)
10234{
10235 /* The lazy approach for now... */
10236 if ((GCPtrMem & 31) == 0)
10237 {
10238 PRTUINT256U pu256Dst = (PRTUINT256U)iemMemMapJmp(pVCpu, sizeof(*pu256Dst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W);
10239 pu256Dst->au64[0] = pu256Value->au64[0];
10240 pu256Dst->au64[1] = pu256Value->au64[1];
10241 pu256Dst->au64[2] = pu256Value->au64[2];
10242 pu256Dst->au64[3] = pu256Value->au64[3];
10243 iemMemCommitAndUnmapJmp(pVCpu, pu256Dst, IEM_ACCESS_DATA_W);
10244 return;
10245 }
10246
10247 VBOXSTRICTRC rcStrict = iemRaiseGeneralProtectionFault0(pVCpu);
10248 longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VBOXSTRICTRC_VAL(rcStrict));
10249}
10250#endif
10251
10252
10253/**
10254 * Stores a descriptor register (sgdt, sidt).
10255 *
10256 * @returns Strict VBox status code.
10257 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10258 * @param cbLimit The limit.
10259 * @param GCPtrBase The base address.
10260 * @param iSegReg The index of the segment register to use for
10261 * this access. The base and limits are checked.
10262 * @param GCPtrMem The address of the guest memory.
10263 */
10264IEM_STATIC VBOXSTRICTRC
10265iemMemStoreDataXdtr(PVMCPUCC pVCpu, uint16_t cbLimit, RTGCPTR GCPtrBase, uint8_t iSegReg, RTGCPTR GCPtrMem)
10266{
10267 /*
10268 * The SIDT and SGDT instructions actually stores the data using two
10269 * independent writes. The instructions does not respond to opsize prefixes.
10270 */
10271 VBOXSTRICTRC rcStrict = iemMemStoreDataU16(pVCpu, iSegReg, GCPtrMem, cbLimit);
10272 if (rcStrict == VINF_SUCCESS)
10273 {
10274 if (pVCpu->iem.s.enmCpuMode == IEMMODE_16BIT)
10275 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2,
10276 IEM_GET_TARGET_CPU(pVCpu) <= IEMTARGETCPU_286
10277 ? (uint32_t)GCPtrBase | UINT32_C(0xff000000) : (uint32_t)GCPtrBase);
10278 else if (pVCpu->iem.s.enmCpuMode == IEMMODE_32BIT)
10279 rcStrict = iemMemStoreDataU32(pVCpu, iSegReg, GCPtrMem + 2, (uint32_t)GCPtrBase);
10280 else
10281 rcStrict = iemMemStoreDataU64(pVCpu, iSegReg, GCPtrMem + 2, GCPtrBase);
10282 }
10283 return rcStrict;
10284}
10285
10286
10287/**
10288 * Pushes a word onto the stack.
10289 *
10290 * @returns Strict VBox status code.
10291 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10292 * @param u16Value The value to push.
10293 */
10294IEM_STATIC VBOXSTRICTRC iemMemStackPushU16(PVMCPUCC pVCpu, uint16_t u16Value)
10295{
10296 /* Increment the stack pointer. */
10297 uint64_t uNewRsp;
10298 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 2, &uNewRsp);
10299
10300 /* Write the word the lazy way. */
10301 uint16_t *pu16Dst;
10302 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10303 if (rc == VINF_SUCCESS)
10304 {
10305 *pu16Dst = u16Value;
10306 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10307 }
10308
10309 /* Commit the new RSP value unless we an access handler made trouble. */
10310 if (rc == VINF_SUCCESS)
10311 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10312
10313 return rc;
10314}
10315
10316
10317/**
10318 * Pushes a dword onto the stack.
10319 *
10320 * @returns Strict VBox status code.
10321 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10322 * @param u32Value The value to push.
10323 */
10324IEM_STATIC VBOXSTRICTRC iemMemStackPushU32(PVMCPUCC pVCpu, uint32_t u32Value)
10325{
10326 /* Increment the stack pointer. */
10327 uint64_t uNewRsp;
10328 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10329
10330 /* Write the dword the lazy way. */
10331 uint32_t *pu32Dst;
10332 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10333 if (rc == VINF_SUCCESS)
10334 {
10335 *pu32Dst = u32Value;
10336 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10337 }
10338
10339 /* Commit the new RSP value unless we an access handler made trouble. */
10340 if (rc == VINF_SUCCESS)
10341 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10342
10343 return rc;
10344}
10345
10346
10347/**
10348 * Pushes a dword segment register value onto the stack.
10349 *
10350 * @returns Strict VBox status code.
10351 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10352 * @param u32Value The value to push.
10353 */
10354IEM_STATIC VBOXSTRICTRC iemMemStackPushU32SReg(PVMCPUCC pVCpu, uint32_t u32Value)
10355{
10356 /* Increment the stack pointer. */
10357 uint64_t uNewRsp;
10358 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 4, &uNewRsp);
10359
10360 /* The intel docs talks about zero extending the selector register
10361 value. My actual intel CPU here might be zero extending the value
10362 but it still only writes the lower word... */
10363 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
10364 * happens when crossing an electric page boundrary, is the high word checked
10365 * for write accessibility or not? Probably it is. What about segment limits?
10366 * It appears this behavior is also shared with trap error codes.
10367 *
10368 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
10369 * ancient hardware when it actually did change. */
10370 uint16_t *pu16Dst;
10371 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW);
10372 if (rc == VINF_SUCCESS)
10373 {
10374 *pu16Dst = (uint16_t)u32Value;
10375 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW);
10376 }
10377
10378 /* Commit the new RSP value unless we an access handler made trouble. */
10379 if (rc == VINF_SUCCESS)
10380 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10381
10382 return rc;
10383}
10384
10385
10386/**
10387 * Pushes a qword onto the stack.
10388 *
10389 * @returns Strict VBox status code.
10390 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10391 * @param u64Value The value to push.
10392 */
10393IEM_STATIC VBOXSTRICTRC iemMemStackPushU64(PVMCPUCC pVCpu, uint64_t u64Value)
10394{
10395 /* Increment the stack pointer. */
10396 uint64_t uNewRsp;
10397 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, 8, &uNewRsp);
10398
10399 /* Write the word the lazy way. */
10400 uint64_t *pu64Dst;
10401 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10402 if (rc == VINF_SUCCESS)
10403 {
10404 *pu64Dst = u64Value;
10405 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10406 }
10407
10408 /* Commit the new RSP value unless we an access handler made trouble. */
10409 if (rc == VINF_SUCCESS)
10410 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10411
10412 return rc;
10413}
10414
10415
10416/**
10417 * Pops a word from the stack.
10418 *
10419 * @returns Strict VBox status code.
10420 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10421 * @param pu16Value Where to store the popped value.
10422 */
10423IEM_STATIC VBOXSTRICTRC iemMemStackPopU16(PVMCPUCC pVCpu, uint16_t *pu16Value)
10424{
10425 /* Increment the stack pointer. */
10426 uint64_t uNewRsp;
10427 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 2, &uNewRsp);
10428
10429 /* Write the word the lazy way. */
10430 uint16_t const *pu16Src;
10431 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10432 if (rc == VINF_SUCCESS)
10433 {
10434 *pu16Value = *pu16Src;
10435 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10436
10437 /* Commit the new RSP value. */
10438 if (rc == VINF_SUCCESS)
10439 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10440 }
10441
10442 return rc;
10443}
10444
10445
10446/**
10447 * Pops a dword from the stack.
10448 *
10449 * @returns Strict VBox status code.
10450 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10451 * @param pu32Value Where to store the popped value.
10452 */
10453IEM_STATIC VBOXSTRICTRC iemMemStackPopU32(PVMCPUCC pVCpu, uint32_t *pu32Value)
10454{
10455 /* Increment the stack pointer. */
10456 uint64_t uNewRsp;
10457 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 4, &uNewRsp);
10458
10459 /* Write the word the lazy way. */
10460 uint32_t const *pu32Src;
10461 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10462 if (rc == VINF_SUCCESS)
10463 {
10464 *pu32Value = *pu32Src;
10465 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10466
10467 /* Commit the new RSP value. */
10468 if (rc == VINF_SUCCESS)
10469 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10470 }
10471
10472 return rc;
10473}
10474
10475
10476/**
10477 * Pops a qword from the stack.
10478 *
10479 * @returns Strict VBox status code.
10480 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10481 * @param pu64Value Where to store the popped value.
10482 */
10483IEM_STATIC VBOXSTRICTRC iemMemStackPopU64(PVMCPUCC pVCpu, uint64_t *pu64Value)
10484{
10485 /* Increment the stack pointer. */
10486 uint64_t uNewRsp;
10487 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, 8, &uNewRsp);
10488
10489 /* Write the word the lazy way. */
10490 uint64_t const *pu64Src;
10491 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10492 if (rc == VINF_SUCCESS)
10493 {
10494 *pu64Value = *pu64Src;
10495 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10496
10497 /* Commit the new RSP value. */
10498 if (rc == VINF_SUCCESS)
10499 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10500 }
10501
10502 return rc;
10503}
10504
10505
10506/**
10507 * Pushes a word onto the stack, using a temporary stack pointer.
10508 *
10509 * @returns Strict VBox status code.
10510 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10511 * @param u16Value The value to push.
10512 * @param pTmpRsp Pointer to the temporary stack pointer.
10513 */
10514IEM_STATIC VBOXSTRICTRC iemMemStackPushU16Ex(PVMCPUCC pVCpu, uint16_t u16Value, PRTUINT64U pTmpRsp)
10515{
10516 /* Increment the stack pointer. */
10517 RTUINT64U NewRsp = *pTmpRsp;
10518 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 2);
10519
10520 /* Write the word the lazy way. */
10521 uint16_t *pu16Dst;
10522 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(*pu16Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10523 if (rc == VINF_SUCCESS)
10524 {
10525 *pu16Dst = u16Value;
10526 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_W);
10527 }
10528
10529 /* Commit the new RSP value unless we an access handler made trouble. */
10530 if (rc == VINF_SUCCESS)
10531 *pTmpRsp = NewRsp;
10532
10533 return rc;
10534}
10535
10536
10537/**
10538 * Pushes a dword onto the stack, using a temporary stack pointer.
10539 *
10540 * @returns Strict VBox status code.
10541 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10542 * @param u32Value The value to push.
10543 * @param pTmpRsp Pointer to the temporary stack pointer.
10544 */
10545IEM_STATIC VBOXSTRICTRC iemMemStackPushU32Ex(PVMCPUCC pVCpu, uint32_t u32Value, PRTUINT64U pTmpRsp)
10546{
10547 /* Increment the stack pointer. */
10548 RTUINT64U NewRsp = *pTmpRsp;
10549 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 4);
10550
10551 /* Write the word the lazy way. */
10552 uint32_t *pu32Dst;
10553 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10554 if (rc == VINF_SUCCESS)
10555 {
10556 *pu32Dst = u32Value;
10557 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W);
10558 }
10559
10560 /* Commit the new RSP value unless we an access handler made trouble. */
10561 if (rc == VINF_SUCCESS)
10562 *pTmpRsp = NewRsp;
10563
10564 return rc;
10565}
10566
10567
10568/**
10569 * Pushes a dword onto the stack, using a temporary stack pointer.
10570 *
10571 * @returns Strict VBox status code.
10572 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10573 * @param u64Value The value to push.
10574 * @param pTmpRsp Pointer to the temporary stack pointer.
10575 */
10576IEM_STATIC VBOXSTRICTRC iemMemStackPushU64Ex(PVMCPUCC pVCpu, uint64_t u64Value, PRTUINT64U pTmpRsp)
10577{
10578 /* Increment the stack pointer. */
10579 RTUINT64U NewRsp = *pTmpRsp;
10580 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, 8);
10581
10582 /* Write the word the lazy way. */
10583 uint64_t *pu64Dst;
10584 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Dst, sizeof(*pu64Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10585 if (rc == VINF_SUCCESS)
10586 {
10587 *pu64Dst = u64Value;
10588 rc = iemMemCommitAndUnmap(pVCpu, pu64Dst, IEM_ACCESS_STACK_W);
10589 }
10590
10591 /* Commit the new RSP value unless we an access handler made trouble. */
10592 if (rc == VINF_SUCCESS)
10593 *pTmpRsp = NewRsp;
10594
10595 return rc;
10596}
10597
10598
10599/**
10600 * Pops a word from the stack, using a temporary stack pointer.
10601 *
10602 * @returns Strict VBox status code.
10603 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10604 * @param pu16Value Where to store the popped value.
10605 * @param pTmpRsp Pointer to the temporary stack pointer.
10606 */
10607IEM_STATIC VBOXSTRICTRC iemMemStackPopU16Ex(PVMCPUCC pVCpu, uint16_t *pu16Value, PRTUINT64U pTmpRsp)
10608{
10609 /* Increment the stack pointer. */
10610 RTUINT64U NewRsp = *pTmpRsp;
10611 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 2);
10612
10613 /* Write the word the lazy way. */
10614 uint16_t const *pu16Src;
10615 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10616 if (rc == VINF_SUCCESS)
10617 {
10618 *pu16Value = *pu16Src;
10619 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_STACK_R);
10620
10621 /* Commit the new RSP value. */
10622 if (rc == VINF_SUCCESS)
10623 *pTmpRsp = NewRsp;
10624 }
10625
10626 return rc;
10627}
10628
10629
10630/**
10631 * Pops a dword from the stack, using a temporary stack pointer.
10632 *
10633 * @returns Strict VBox status code.
10634 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10635 * @param pu32Value Where to store the popped value.
10636 * @param pTmpRsp Pointer to the temporary stack pointer.
10637 */
10638IEM_STATIC VBOXSTRICTRC iemMemStackPopU32Ex(PVMCPUCC pVCpu, uint32_t *pu32Value, PRTUINT64U pTmpRsp)
10639{
10640 /* Increment the stack pointer. */
10641 RTUINT64U NewRsp = *pTmpRsp;
10642 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 4);
10643
10644 /* Write the word the lazy way. */
10645 uint32_t const *pu32Src;
10646 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10647 if (rc == VINF_SUCCESS)
10648 {
10649 *pu32Value = *pu32Src;
10650 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_STACK_R);
10651
10652 /* Commit the new RSP value. */
10653 if (rc == VINF_SUCCESS)
10654 *pTmpRsp = NewRsp;
10655 }
10656
10657 return rc;
10658}
10659
10660
10661/**
10662 * Pops a qword from the stack, using a temporary stack pointer.
10663 *
10664 * @returns Strict VBox status code.
10665 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10666 * @param pu64Value Where to store the popped value.
10667 * @param pTmpRsp Pointer to the temporary stack pointer.
10668 */
10669IEM_STATIC VBOXSTRICTRC iemMemStackPopU64Ex(PVMCPUCC pVCpu, uint64_t *pu64Value, PRTUINT64U pTmpRsp)
10670{
10671 /* Increment the stack pointer. */
10672 RTUINT64U NewRsp = *pTmpRsp;
10673 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10674
10675 /* Write the word the lazy way. */
10676 uint64_t const *pu64Src;
10677 VBOXSTRICTRC rcStrict = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10678 if (rcStrict == VINF_SUCCESS)
10679 {
10680 *pu64Value = *pu64Src;
10681 rcStrict = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_STACK_R);
10682
10683 /* Commit the new RSP value. */
10684 if (rcStrict == VINF_SUCCESS)
10685 *pTmpRsp = NewRsp;
10686 }
10687
10688 return rcStrict;
10689}
10690
10691
10692/**
10693 * Begin a special stack push (used by interrupt, exceptions and such).
10694 *
10695 * This will raise \#SS or \#PF if appropriate.
10696 *
10697 * @returns Strict VBox status code.
10698 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10699 * @param cbMem The number of bytes to push onto the stack.
10700 * @param ppvMem Where to return the pointer to the stack memory.
10701 * As with the other memory functions this could be
10702 * direct access or bounce buffered access, so
10703 * don't commit register until the commit call
10704 * succeeds.
10705 * @param puNewRsp Where to return the new RSP value. This must be
10706 * passed unchanged to
10707 * iemMemStackPushCommitSpecial().
10708 */
10709IEM_STATIC VBOXSTRICTRC iemMemStackPushBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void **ppvMem, uint64_t *puNewRsp)
10710{
10711 Assert(cbMem < UINT8_MAX);
10712 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, (uint8_t)cbMem, puNewRsp);
10713 return iemMemMap(pVCpu, ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W);
10714}
10715
10716
10717/**
10718 * Commits a special stack push (started by iemMemStackPushBeginSpecial).
10719 *
10720 * This will update the rSP.
10721 *
10722 * @returns Strict VBox status code.
10723 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10724 * @param pvMem The pointer returned by
10725 * iemMemStackPushBeginSpecial().
10726 * @param uNewRsp The new RSP value returned by
10727 * iemMemStackPushBeginSpecial().
10728 */
10729IEM_STATIC VBOXSTRICTRC iemMemStackPushCommitSpecial(PVMCPUCC pVCpu, void *pvMem, uint64_t uNewRsp)
10730{
10731 VBOXSTRICTRC rcStrict = iemMemCommitAndUnmap(pVCpu, pvMem, IEM_ACCESS_STACK_W);
10732 if (rcStrict == VINF_SUCCESS)
10733 pVCpu->cpum.GstCtx.rsp = uNewRsp;
10734 return rcStrict;
10735}
10736
10737
10738/**
10739 * Begin a special stack pop (used by iret, retf and such).
10740 *
10741 * This will raise \#SS or \#PF if appropriate.
10742 *
10743 * @returns Strict VBox status code.
10744 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10745 * @param cbMem The number of bytes to pop from the stack.
10746 * @param ppvMem Where to return the pointer to the stack memory.
10747 * @param puNewRsp Where to return the new RSP value. This must be
10748 * assigned to CPUMCTX::rsp manually some time
10749 * after iemMemStackPopDoneSpecial() has been
10750 * called.
10751 */
10752IEM_STATIC VBOXSTRICTRC iemMemStackPopBeginSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10753{
10754 Assert(cbMem < UINT8_MAX);
10755 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, (uint8_t)cbMem, puNewRsp);
10756 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10757}
10758
10759
10760/**
10761 * Continue a special stack pop (used by iret and retf).
10762 *
10763 * This will raise \#SS or \#PF if appropriate.
10764 *
10765 * @returns Strict VBox status code.
10766 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10767 * @param cbMem The number of bytes to pop from the stack.
10768 * @param ppvMem Where to return the pointer to the stack memory.
10769 * @param puNewRsp Where to return the new RSP value. This must be
10770 * assigned to CPUMCTX::rsp manually some time
10771 * after iemMemStackPopDoneSpecial() has been
10772 * called.
10773 */
10774IEM_STATIC VBOXSTRICTRC iemMemStackPopContinueSpecial(PVMCPUCC pVCpu, size_t cbMem, void const **ppvMem, uint64_t *puNewRsp)
10775{
10776 Assert(cbMem < UINT8_MAX);
10777 RTUINT64U NewRsp;
10778 NewRsp.u = *puNewRsp;
10779 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, 8);
10780 *puNewRsp = NewRsp.u;
10781 return iemMemMap(pVCpu, (void **)ppvMem, cbMem, X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_R);
10782}
10783
10784
10785/**
10786 * Done with a special stack pop (started by iemMemStackPopBeginSpecial or
10787 * iemMemStackPopContinueSpecial).
10788 *
10789 * The caller will manually commit the rSP.
10790 *
10791 * @returns Strict VBox status code.
10792 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10793 * @param pvMem The pointer returned by
10794 * iemMemStackPopBeginSpecial() or
10795 * iemMemStackPopContinueSpecial().
10796 */
10797IEM_STATIC VBOXSTRICTRC iemMemStackPopDoneSpecial(PVMCPUCC pVCpu, void const *pvMem)
10798{
10799 return iemMemCommitAndUnmap(pVCpu, (void *)pvMem, IEM_ACCESS_STACK_R);
10800}
10801
10802
10803/**
10804 * Fetches a system table byte.
10805 *
10806 * @returns Strict VBox status code.
10807 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10808 * @param pbDst Where to return the byte.
10809 * @param iSegReg The index of the segment register to use for
10810 * this access. The base and limits are checked.
10811 * @param GCPtrMem The address of the guest memory.
10812 */
10813IEM_STATIC VBOXSTRICTRC iemMemFetchSysU8(PVMCPUCC pVCpu, uint8_t *pbDst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10814{
10815 /* The lazy approach for now... */
10816 uint8_t const *pbSrc;
10817 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pbSrc, sizeof(*pbSrc), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10818 if (rc == VINF_SUCCESS)
10819 {
10820 *pbDst = *pbSrc;
10821 rc = iemMemCommitAndUnmap(pVCpu, (void *)pbSrc, IEM_ACCESS_SYS_R);
10822 }
10823 return rc;
10824}
10825
10826
10827/**
10828 * Fetches a system table word.
10829 *
10830 * @returns Strict VBox status code.
10831 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10832 * @param pu16Dst Where to return the word.
10833 * @param iSegReg The index of the segment register to use for
10834 * this access. The base and limits are checked.
10835 * @param GCPtrMem The address of the guest memory.
10836 */
10837IEM_STATIC VBOXSTRICTRC iemMemFetchSysU16(PVMCPUCC pVCpu, uint16_t *pu16Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10838{
10839 /* The lazy approach for now... */
10840 uint16_t const *pu16Src;
10841 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Src, sizeof(*pu16Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10842 if (rc == VINF_SUCCESS)
10843 {
10844 *pu16Dst = *pu16Src;
10845 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu16Src, IEM_ACCESS_SYS_R);
10846 }
10847 return rc;
10848}
10849
10850
10851/**
10852 * Fetches a system table dword.
10853 *
10854 * @returns Strict VBox status code.
10855 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10856 * @param pu32Dst Where to return the dword.
10857 * @param iSegReg The index of the segment register to use for
10858 * this access. The base and limits are checked.
10859 * @param GCPtrMem The address of the guest memory.
10860 */
10861IEM_STATIC VBOXSTRICTRC iemMemFetchSysU32(PVMCPUCC pVCpu, uint32_t *pu32Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10862{
10863 /* The lazy approach for now... */
10864 uint32_t const *pu32Src;
10865 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu32Src, sizeof(*pu32Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10866 if (rc == VINF_SUCCESS)
10867 {
10868 *pu32Dst = *pu32Src;
10869 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu32Src, IEM_ACCESS_SYS_R);
10870 }
10871 return rc;
10872}
10873
10874
10875/**
10876 * Fetches a system table qword.
10877 *
10878 * @returns Strict VBox status code.
10879 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10880 * @param pu64Dst Where to return the qword.
10881 * @param iSegReg The index of the segment register to use for
10882 * this access. The base and limits are checked.
10883 * @param GCPtrMem The address of the guest memory.
10884 */
10885IEM_STATIC VBOXSTRICTRC iemMemFetchSysU64(PVMCPUCC pVCpu, uint64_t *pu64Dst, uint8_t iSegReg, RTGCPTR GCPtrMem)
10886{
10887 /* The lazy approach for now... */
10888 uint64_t const *pu64Src;
10889 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu64Src, sizeof(*pu64Src), iSegReg, GCPtrMem, IEM_ACCESS_SYS_R);
10890 if (rc == VINF_SUCCESS)
10891 {
10892 *pu64Dst = *pu64Src;
10893 rc = iemMemCommitAndUnmap(pVCpu, (void *)pu64Src, IEM_ACCESS_SYS_R);
10894 }
10895 return rc;
10896}
10897
10898
10899/**
10900 * Fetches a descriptor table entry with caller specified error code.
10901 *
10902 * @returns Strict VBox status code.
10903 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10904 * @param pDesc Where to return the descriptor table entry.
10905 * @param uSel The selector which table entry to fetch.
10906 * @param uXcpt The exception to raise on table lookup error.
10907 * @param uErrorCode The error code associated with the exception.
10908 */
10909IEM_STATIC VBOXSTRICTRC
10910iemMemFetchSelDescWithErr(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt, uint16_t uErrorCode)
10911{
10912 AssertPtr(pDesc);
10913 IEM_CTX_IMPORT_RET(pVCpu, CPUMCTX_EXTRN_GDTR | CPUMCTX_EXTRN_LDTR);
10914
10915 /** @todo did the 286 require all 8 bytes to be accessible? */
10916 /*
10917 * Get the selector table base and check bounds.
10918 */
10919 RTGCPTR GCPtrBase;
10920 if (uSel & X86_SEL_LDT)
10921 {
10922 if ( !pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present
10923 || (uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.ldtr.u32Limit )
10924 {
10925 Log(("iemMemFetchSelDesc: LDT selector %#x is out of bounds (%3x) or ldtr is NP (%#x)\n",
10926 uSel, pVCpu->cpum.GstCtx.ldtr.u32Limit, pVCpu->cpum.GstCtx.ldtr.Sel));
10927 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10928 uErrorCode, 0);
10929 }
10930
10931 Assert(pVCpu->cpum.GstCtx.ldtr.Attr.n.u1Present);
10932 GCPtrBase = pVCpu->cpum.GstCtx.ldtr.u64Base;
10933 }
10934 else
10935 {
10936 if ((uSel | X86_SEL_RPL_LDT) > pVCpu->cpum.GstCtx.gdtr.cbGdt)
10937 {
10938 Log(("iemMemFetchSelDesc: GDT selector %#x is out of bounds (%3x)\n", uSel, pVCpu->cpum.GstCtx.gdtr.cbGdt));
10939 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR,
10940 uErrorCode, 0);
10941 }
10942 GCPtrBase = pVCpu->cpum.GstCtx.gdtr.pGdt;
10943 }
10944
10945 /*
10946 * Read the legacy descriptor and maybe the long mode extensions if
10947 * required.
10948 */
10949 VBOXSTRICTRC rcStrict;
10950 if (IEM_GET_TARGET_CPU(pVCpu) > IEMTARGETCPU_286)
10951 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Legacy.u, UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK));
10952 else
10953 {
10954 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[0], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 0);
10955 if (rcStrict == VINF_SUCCESS)
10956 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[1], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 2);
10957 if (rcStrict == VINF_SUCCESS)
10958 rcStrict = iemMemFetchSysU16(pVCpu, &pDesc->Legacy.au16[2], UINT8_MAX, GCPtrBase + (uSel & X86_SEL_MASK) + 4);
10959 if (rcStrict == VINF_SUCCESS)
10960 pDesc->Legacy.au16[3] = 0;
10961 else
10962 return rcStrict;
10963 }
10964
10965 if (rcStrict == VINF_SUCCESS)
10966 {
10967 if ( !IEM_IS_LONG_MODE(pVCpu)
10968 || pDesc->Legacy.Gen.u1DescType)
10969 pDesc->Long.au64[1] = 0;
10970 else if ((uint32_t)(uSel | X86_SEL_RPL_LDT) + 8 <= (uSel & X86_SEL_LDT ? pVCpu->cpum.GstCtx.ldtr.u32Limit : pVCpu->cpum.GstCtx.gdtr.cbGdt))
10971 rcStrict = iemMemFetchSysU64(pVCpu, &pDesc->Long.au64[1], UINT8_MAX, GCPtrBase + (uSel | X86_SEL_RPL_LDT) + 1);
10972 else
10973 {
10974 Log(("iemMemFetchSelDesc: system selector %#x is out of bounds\n", uSel));
10975 /** @todo is this the right exception? */
10976 return iemRaiseXcptOrInt(pVCpu, 0, uXcpt, IEM_XCPT_FLAGS_T_CPU_XCPT | IEM_XCPT_FLAGS_ERR, uErrorCode, 0);
10977 }
10978 }
10979 return rcStrict;
10980}
10981
10982
10983/**
10984 * Fetches a descriptor table entry.
10985 *
10986 * @returns Strict VBox status code.
10987 * @param pVCpu The cross context virtual CPU structure of the calling thread.
10988 * @param pDesc Where to return the descriptor table entry.
10989 * @param uSel The selector which table entry to fetch.
10990 * @param uXcpt The exception to raise on table lookup error.
10991 */
10992IEM_STATIC VBOXSTRICTRC iemMemFetchSelDesc(PVMCPUCC pVCpu, PIEMSELDESC pDesc, uint16_t uSel, uint8_t uXcpt)
10993{
10994 return iemMemFetchSelDescWithErr(pVCpu, pDesc, uSel, uXcpt, uSel & X86_SEL_MASK_OFF_RPL);
10995}
10996
10997
10998/**
10999 * Fakes a long mode stack selector for SS = 0.
11000 *
11001 * @param pDescSs Where to return the fake stack descriptor.
11002 * @param uDpl The DPL we want.
11003 */
11004IEM_STATIC void iemMemFakeStackSelDesc(PIEMSELDESC pDescSs, uint32_t uDpl)
11005{
11006 pDescSs->Long.au64[0] = 0;
11007 pDescSs->Long.au64[1] = 0;
11008 pDescSs->Long.Gen.u4Type = X86_SEL_TYPE_RW_ACC;
11009 pDescSs->Long.Gen.u1DescType = 1; /* 1 = code / data, 0 = system. */
11010 pDescSs->Long.Gen.u2Dpl = uDpl;
11011 pDescSs->Long.Gen.u1Present = 1;
11012 pDescSs->Long.Gen.u1Long = 1;
11013}
11014
11015
11016/**
11017 * Marks the selector descriptor as accessed (only non-system descriptors).
11018 *
11019 * This function ASSUMES that iemMemFetchSelDesc has be called previously and
11020 * will therefore skip the limit checks.
11021 *
11022 * @returns Strict VBox status code.
11023 * @param pVCpu The cross context virtual CPU structure of the calling thread.
11024 * @param uSel The selector.
11025 */
11026IEM_STATIC VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel)
11027{
11028 /*
11029 * Get the selector table base and calculate the entry address.
11030 */
11031 RTGCPTR GCPtr = uSel & X86_SEL_LDT
11032 ? pVCpu->cpum.GstCtx.ldtr.u64Base
11033 : pVCpu->cpum.GstCtx.gdtr.pGdt;
11034 GCPtr += uSel & X86_SEL_MASK;
11035
11036 /*
11037 * ASMAtomicBitSet will assert if the address is misaligned, so do some
11038 * ugly stuff to avoid this. This will make sure it's an atomic access
11039 * as well more or less remove any question about 8-bit or 32-bit accesss.
11040 */
11041 VBOXSTRICTRC rcStrict;
11042 uint32_t volatile *pu32;
11043 if ((GCPtr & 3) == 0)
11044 {
11045 /* The normal case, map the 32-bit bits around the accessed bit (40). */
11046 GCPtr += 2 + 2;
11047 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 4, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11048 if (rcStrict != VINF_SUCCESS)
11049 return rcStrict;
11050 ASMAtomicBitSet(pu32, 8); /* X86_SEL_TYPE_ACCESSED is 1, but it is preceeded by u8BaseHigh1. */
11051 }
11052 else
11053 {
11054 /* The misaligned GDT/LDT case, map the whole thing. */
11055 rcStrict = iemMemMap(pVCpu, (void **)&pu32, 8, UINT8_MAX, GCPtr, IEM_ACCESS_SYS_RW);
11056 if (rcStrict != VINF_SUCCESS)
11057 return rcStrict;
11058 switch ((uintptr_t)pu32 & 3)
11059 {
11060 case 0: ASMAtomicBitSet(pu32, 40 + 0 - 0); break;
11061 case 1: ASMAtomicBitSet((uint8_t volatile *)pu32 + 3, 40 + 0 - 24); break;
11062 case 2: ASMAtomicBitSet((uint8_t volatile *)pu32 + 2, 40 + 0 - 16); break;
11063 case 3: ASMAtomicBitSet((uint8_t volatile *)pu32 + 1, 40 + 0 - 8); break;
11064 }
11065 }
11066
11067 return iemMemCommitAndUnmap(pVCpu, (void *)pu32, IEM_ACCESS_SYS_RW);
11068}
11069
11070/** @} */
11071
11072
11073/*
11074 * Include the C/C++ implementation of instruction.
11075 */
11076#include "IEMAllCImpl.cpp.h"
11077
11078
11079
11080/** @name "Microcode" macros.
11081 *
11082 * The idea is that we should be able to use the same code to interpret
11083 * instructions as well as recompiler instructions. Thus this obfuscation.
11084 *
11085 * @{
11086 */
11087#define IEM_MC_BEGIN(a_cArgs, a_cLocals) {
11088#define IEM_MC_END() }
11089#define IEM_MC_PAUSE() do {} while (0)
11090#define IEM_MC_CONTINUE() do {} while (0)
11091
11092/** Internal macro. */
11093#define IEM_MC_RETURN_ON_FAILURE(a_Expr) \
11094 do \
11095 { \
11096 VBOXSTRICTRC rcStrict2 = a_Expr; \
11097 if (rcStrict2 != VINF_SUCCESS) \
11098 return rcStrict2; \
11099 } while (0)
11100
11101
11102#define IEM_MC_ADVANCE_RIP() iemRegUpdateRipAndClearRF(pVCpu)
11103#define IEM_MC_REL_JMP_S8(a_i8) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS8(pVCpu, a_i8))
11104#define IEM_MC_REL_JMP_S16(a_i16) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS16(pVCpu, a_i16))
11105#define IEM_MC_REL_JMP_S32(a_i32) IEM_MC_RETURN_ON_FAILURE(iemRegRipRelativeJumpS32(pVCpu, a_i32))
11106#define IEM_MC_SET_RIP_U16(a_u16NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u16NewIP)))
11107#define IEM_MC_SET_RIP_U32(a_u32NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u32NewIP)))
11108#define IEM_MC_SET_RIP_U64(a_u64NewIP) IEM_MC_RETURN_ON_FAILURE(iemRegRipJump((pVCpu), (a_u64NewIP)))
11109#define IEM_MC_RAISE_DIVIDE_ERROR() return iemRaiseDivideError(pVCpu)
11110#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() \
11111 do { \
11112 if (pVCpu->cpum.GstCtx.cr0 & (X86_CR0_EM | X86_CR0_TS)) \
11113 return iemRaiseDeviceNotAvailable(pVCpu); \
11114 } while (0)
11115#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() \
11116 do { \
11117 if ((pVCpu->cpum.GstCtx.cr0 & (X86_CR0_MP | X86_CR0_TS)) == (X86_CR0_MP | X86_CR0_TS)) \
11118 return iemRaiseDeviceNotAvailable(pVCpu); \
11119 } while (0)
11120#define IEM_MC_MAYBE_RAISE_FPU_XCPT() \
11121 do { \
11122 if (pVCpu->cpum.GstCtx.XState.x87.FSW & X86_FSW_ES) \
11123 return iemRaiseMathFault(pVCpu); \
11124 } while (0)
11125#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() \
11126 do { \
11127 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11128 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11129 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx2) \
11130 return iemRaiseUndefinedOpcode(pVCpu); \
11131 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11132 return iemRaiseDeviceNotAvailable(pVCpu); \
11133 } while (0)
11134#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() \
11135 do { \
11136 if ( (pVCpu->cpum.GstCtx.aXcr[0] & (XSAVE_C_YMM | XSAVE_C_SSE)) != (XSAVE_C_YMM | XSAVE_C_SSE) \
11137 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSXSAVE) \
11138 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAvx) \
11139 return iemRaiseUndefinedOpcode(pVCpu); \
11140 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11141 return iemRaiseDeviceNotAvailable(pVCpu); \
11142 } while (0)
11143#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() \
11144 do { \
11145 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11146 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11147 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse41) \
11148 return iemRaiseUndefinedOpcode(pVCpu); \
11149 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11150 return iemRaiseDeviceNotAvailable(pVCpu); \
11151 } while (0)
11152#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() \
11153 do { \
11154 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11155 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11156 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse3) \
11157 return iemRaiseUndefinedOpcode(pVCpu); \
11158 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11159 return iemRaiseDeviceNotAvailable(pVCpu); \
11160 } while (0)
11161#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() \
11162 do { \
11163 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11164 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11165 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse2) \
11166 return iemRaiseUndefinedOpcode(pVCpu); \
11167 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11168 return iemRaiseDeviceNotAvailable(pVCpu); \
11169 } while (0)
11170#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() \
11171 do { \
11172 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11173 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_OSFXSR) \
11174 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse) \
11175 return iemRaiseUndefinedOpcode(pVCpu); \
11176 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11177 return iemRaiseDeviceNotAvailable(pVCpu); \
11178 } while (0)
11179#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() \
11180 do { \
11181 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11182 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fMmx) \
11183 return iemRaiseUndefinedOpcode(pVCpu); \
11184 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11185 return iemRaiseDeviceNotAvailable(pVCpu); \
11186 } while (0)
11187#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() \
11188 do { \
11189 if ( (pVCpu->cpum.GstCtx.cr0 & X86_CR0_EM) \
11190 || ( !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fSse \
11191 && !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fAmdMmxExts) ) \
11192 return iemRaiseUndefinedOpcode(pVCpu); \
11193 if (pVCpu->cpum.GstCtx.cr0 & X86_CR0_TS) \
11194 return iemRaiseDeviceNotAvailable(pVCpu); \
11195 } while (0)
11196#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() \
11197 do { \
11198 if (pVCpu->iem.s.uCpl != 0) \
11199 return iemRaiseGeneralProtectionFault0(pVCpu); \
11200 } while (0)
11201#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
11202 do { \
11203 if (!((a_EffAddr) & ((a_cbAlign) - 1))) { /* likely */ } \
11204 else return iemRaiseGeneralProtectionFault0(pVCpu); \
11205 } while (0)
11206#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() \
11207 do { \
11208 if ( pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT \
11209 || !IEM_GET_GUEST_CPU_FEATURES(pVCpu)->fFsGsBase \
11210 || !(pVCpu->cpum.GstCtx.cr4 & X86_CR4_FSGSBASE)) \
11211 return iemRaiseUndefinedOpcode(pVCpu); \
11212 } while (0)
11213#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) \
11214 do { \
11215 if (!IEM_IS_CANONICAL(a_u64Addr)) \
11216 return iemRaiseGeneralProtectionFault0(pVCpu); \
11217 } while (0)
11218
11219
11220#define IEM_MC_LOCAL(a_Type, a_Name) a_Type a_Name
11221#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) a_Type const a_Name = (a_Value)
11222#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (a_pRefArg) = &(a_Local)
11223#define IEM_MC_ARG(a_Type, a_Name, a_iArg) a_Type a_Name
11224#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) a_Type const a_Name = (a_Value)
11225#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) a_Type const a_Name = &(a_Local)
11226#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) \
11227 uint32_t a_Name; \
11228 uint32_t *a_pName = &a_Name
11229#define IEM_MC_COMMIT_EFLAGS(a_EFlags) \
11230 do { pVCpu->cpum.GstCtx.eflags.u = (a_EFlags); Assert(pVCpu->cpum.GstCtx.eflags.u & X86_EFL_1); } while (0)
11231
11232#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) (a_VarOrArg) = (a_CVariableOrConst)
11233#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
11234
11235#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) (a_u8Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11236#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11237#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11238#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU8(pVCpu, (a_iGReg))
11239#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) (a_u16Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11240#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11241#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int8_t)iemGRegFetchU8(pVCpu, (a_iGReg))
11242#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) (a_u16Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11243#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11244#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU16(pVCpu, (a_iGReg))
11245#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) (a_u32Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11246#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int16_t)iemGRegFetchU16(pVCpu, (a_iGReg))
11247#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) (a_u32Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11248#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU32(pVCpu, (a_iGReg))
11249#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) (a_u64Dst) = (int32_t)iemGRegFetchU32(pVCpu, (a_iGReg))
11250#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) (a_u64Dst) = iemGRegFetchU64(pVCpu, (a_iGReg))
11251#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
11252#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { \
11253 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11254 (a_u16Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11255 } while (0)
11256#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { \
11257 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11258 (a_u32Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11259 } while (0)
11260#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { \
11261 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11262 (a_u64Dst) = iemSRegFetchU16(pVCpu, (a_iSReg)); \
11263 } while (0)
11264/** @todo IEM_MC_FETCH_SREG_BASE_U64 & IEM_MC_FETCH_SREG_BASE_U32 probably aren't worth it... */
11265#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { \
11266 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11267 (a_u64Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11268 } while (0)
11269#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { \
11270 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11271 (a_u32Dst) = iemSRegBaseFetchU64(pVCpu, (a_iSReg)); \
11272 } while (0)
11273/** @note Not for IOPL or IF testing or modification. */
11274#define IEM_MC_FETCH_EFLAGS(a_EFlags) (a_EFlags) = pVCpu->cpum.GstCtx.eflags.u
11275#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) (a_EFlags) = (uint8_t)pVCpu->cpum.GstCtx.eflags.u
11276#define IEM_MC_FETCH_FSW(a_u16Fsw) (a_u16Fsw) = pVCpu->cpum.GstCtx.XState.x87.FSW
11277#define IEM_MC_FETCH_FCW(a_u16Fcw) (a_u16Fcw) = pVCpu->cpum.GstCtx.XState.x87.FCW
11278
11279#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) = (a_u8Value)
11280#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) = (a_u16Value)
11281#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (uint32_t)(a_u32Value) /* clear high bits. */
11282#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) = (a_u64Value)
11283#define IEM_MC_STORE_GREG_U8_CONST IEM_MC_STORE_GREG_U8
11284#define IEM_MC_STORE_GREG_U16_CONST IEM_MC_STORE_GREG_U16
11285#define IEM_MC_STORE_GREG_U32_CONST IEM_MC_STORE_GREG_U32
11286#define IEM_MC_STORE_GREG_U64_CONST IEM_MC_STORE_GREG_U64
11287#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) *iemGRegRefU64(pVCpu, (a_iGReg)) &= UINT32_MAX
11288#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { (a_pu32Dst)[1] = 0; } while (0)
11289/** @todo IEM_MC_STORE_SREG_BASE_U64 & IEM_MC_STORE_SREG_BASE_U32 aren't worth it... */
11290#define IEM_MC_STORE_SREG_BASE_U64(a_iSReg, a_u64Value) do { \
11291 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11292 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (a_u64Value); \
11293 } while (0)
11294#define IEM_MC_STORE_SREG_BASE_U32(a_iSReg, a_u32Value) do { \
11295 IEM_CTX_IMPORT_NORET(pVCpu, CPUMCTX_EXTRN_SREG_FROM_IDX(a_iSReg)); \
11296 *iemSRegBaseRefU64(pVCpu, (a_iSReg)) = (uint32_t)(a_u32Value); /* clear high bits. */ \
11297 } while (0)
11298#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) \
11299 do { pVCpu->cpum.GstCtx.XState.x87.aRegs[a_iSt].r80 = *(a_pr80Src); } while (0)
11300
11301
11302#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) (a_pu8Dst) = iemGRegRefU8( pVCpu, (a_iGReg))
11303#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) (a_pu16Dst) = iemGRegRefU16(pVCpu, (a_iGReg))
11304/** @todo User of IEM_MC_REF_GREG_U32 needs to clear the high bits on commit.
11305 * Use IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF! */
11306#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) (a_pu32Dst) = iemGRegRefU32(pVCpu, (a_iGReg))
11307#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) (a_pu64Dst) = iemGRegRefU64(pVCpu, (a_iGReg))
11308/** @note Not for IOPL or IF testing or modification. */
11309#define IEM_MC_REF_EFLAGS(a_pEFlags) (a_pEFlags) = &pVCpu->cpum.GstCtx.eflags.u
11310
11311#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) += (a_u8Value)
11312#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) += (a_u16Value)
11313#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) \
11314 do { \
11315 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11316 *pu32Reg += (a_u32Value); \
11317 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11318 } while (0)
11319#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) += (a_u64Value)
11320
11321#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) -= (a_u8Value)
11322#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) -= (a_u16Value)
11323#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) \
11324 do { \
11325 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11326 *pu32Reg -= (a_u32Value); \
11327 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11328 } while (0)
11329#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) -= (a_u64Value)
11330#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { (a_u16Value) -= a_u16Const; } while (0)
11331
11332#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u8Value, a_iGReg) do { (a_u8Value) += iemGRegFetchU8( pVCpu, (a_iGReg)); } while (0)
11333#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += iemGRegFetchU16(pVCpu, (a_iGReg)); } while (0)
11334#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += iemGRegFetchU32(pVCpu, (a_iGReg)); } while (0)
11335#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += iemGRegFetchU64(pVCpu, (a_iGReg)); } while (0)
11336#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); } while (0)
11337#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); } while (0)
11338#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); } while (0)
11339
11340#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); } while (0)
11341#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); } while (0)
11342#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11343#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); } while (0)
11344
11345#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); } while (0)
11346#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); } while (0)
11347#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); } while (0)
11348
11349#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); } while (0)
11350#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); } while (0)
11351#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11352
11353#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); } while (0)
11354#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); } while (0)
11355#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); } while (0)
11356
11357#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); } while (0)
11358#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); } while (0)
11359#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); } while (0)
11360
11361#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); } while (0)
11362
11363#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); } while (0)
11364
11365#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) &= (a_u8Value)
11366#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) &= (a_u16Value)
11367#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) \
11368 do { \
11369 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11370 *pu32Reg &= (a_u32Value); \
11371 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11372 } while (0)
11373#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) &= (a_u64Value)
11374
11375#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) *iemGRegRefU8( pVCpu, (a_iGReg)) |= (a_u8Value)
11376#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) *iemGRegRefU16(pVCpu, (a_iGReg)) |= (a_u16Value)
11377#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) \
11378 do { \
11379 uint32_t *pu32Reg = iemGRegRefU32(pVCpu, (a_iGReg)); \
11380 *pu32Reg |= (a_u32Value); \
11381 pu32Reg[1] = 0; /* implicitly clear the high bit. */ \
11382 } while (0)
11383#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) *iemGRegRefU64(pVCpu, (a_iGReg)) |= (a_u64Value)
11384
11385
11386/** @note Not for IOPL or IF modification. */
11387#define IEM_MC_SET_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u |= (a_fBit); } while (0)
11388/** @note Not for IOPL or IF modification. */
11389#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u &= ~(a_fBit); } while (0)
11390/** @note Not for IOPL or IF modification. */
11391#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { pVCpu->cpum.GstCtx.eflags.u ^= (a_fBit); } while (0)
11392
11393#define IEM_MC_CLEAR_FSW_EX() do { pVCpu->cpum.GstCtx.XState.x87.FSW &= X86_FSW_C_MASK | X86_FSW_TOP_MASK; } while (0)
11394
11395/** Switches the FPU state to MMX mode (FSW.TOS=0, FTW=0) if necessary. */
11396#define IEM_MC_FPU_TO_MMX_MODE() do { \
11397 pVCpu->cpum.GstCtx.XState.x87.FSW &= ~X86_FSW_TOP_MASK; \
11398 pVCpu->cpum.GstCtx.XState.x87.FTW = 0xff; \
11399 } while (0)
11400
11401/** Switches the FPU state from MMX mode (FTW=0xffff). */
11402#define IEM_MC_FPU_FROM_MMX_MODE() do { \
11403 pVCpu->cpum.GstCtx.XState.x87.FTW = 0; \
11404 } while (0)
11405
11406#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) \
11407 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx; } while (0)
11408#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) \
11409 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[0]; } while (0)
11410#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { \
11411 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (a_u64Value); \
11412 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11413 } while (0)
11414#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { \
11415 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx = (uint32_t)(a_u32Value); \
11416 pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].au32[2] = 0xffff; \
11417 } while (0)
11418#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) /** @todo need to set high word to 0xffff on commit (see IEM_MC_STORE_MREG_U64) */ \
11419 (a_pu64Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11420#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) \
11421 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11422#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) \
11423 (a_pu32Dst) = ((uint32_t const *)&pVCpu->cpum.GstCtx.XState.x87.aRegs[(a_iMReg)].mmx)
11424
11425#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) \
11426 do { (a_u128Value).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; \
11427 (a_u128Value).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; \
11428 } while (0)
11429#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) \
11430 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0]; } while (0)
11431#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) \
11432 do { (a_u32Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0]; } while (0)
11433#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) \
11434 do { (a_u64Value) = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1]; } while (0)
11435#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) \
11436 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u128Value).au64[0]; \
11437 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u128Value).au64[1]; \
11438 } while (0)
11439#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) \
11440 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); } while (0)
11441#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) \
11442 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (a_u64Value); \
11443 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11444 } while (0)
11445#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) \
11446 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au32[0] = (a_u32Value); } while (0)
11447#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) \
11448 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0] = (uint32_t)(a_u32Value); \
11449 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = 0; \
11450 } while (0)
11451#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) \
11452 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[1] = (a_u64Value); } while (0)
11453#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) \
11454 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11455#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) \
11456 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].uXmm)
11457#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) \
11458 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXReg)].au64[0])
11459#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) \
11460 do { pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[0] \
11461 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[0]; \
11462 pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegDst)].au64[1] \
11463 = pVCpu->cpum.GstCtx.XState.x87.aXMM[(a_iXRegSrc)].au64[1]; \
11464 } while (0)
11465
11466#define IEM_MC_FETCH_YREG_U32(a_u32Dst, a_iYRegSrc) \
11467 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11468 (a_u32Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au32[0]; \
11469 } while (0)
11470#define IEM_MC_FETCH_YREG_U64(a_u64Dst, a_iYRegSrc) \
11471 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11472 (a_u64Dst) = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11473 } while (0)
11474#define IEM_MC_FETCH_YREG_U128(a_u128Dst, a_iYRegSrc) \
11475 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11476 (a_u128Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11477 (a_u128Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11478 } while (0)
11479#define IEM_MC_FETCH_YREG_U256(a_u256Dst, a_iYRegSrc) \
11480 do { uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11481 (a_u256Dst).au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11482 (a_u256Dst).au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11483 (a_u256Dst).au64[2] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11484 (a_u256Dst).au64[3] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11485 } while (0)
11486
11487#define IEM_MC_INT_CLEAR_ZMM_256_UP(a_iXRegDst) do { /* For AVX512 and AVX1024 support. */ } while (0)
11488#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Src) \
11489 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11490 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = (a_u32Src); \
11491 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = 0; \
11492 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11493 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11494 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11495 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11496 } while (0)
11497#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Src) \
11498 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11499 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Src); \
11500 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11501 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11502 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11503 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11504 } while (0)
11505#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Src) \
11506 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11507 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u128Src).au64[0]; \
11508 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u128Src).au64[1]; \
11509 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11510 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11511 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11512 } while (0)
11513#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Src) \
11514 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11515 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u256Src).au64[0]; \
11516 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = (a_u256Src).au64[1]; \
11517 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = (a_u256Src).au64[2]; \
11518 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = (a_u256Src).au64[3]; \
11519 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11520 } while (0)
11521
11522#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) \
11523 (a_pu128Dst) = (&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11524#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) \
11525 (a_pu128Dst) = ((PCRTUINT128U)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].uXmm)
11526#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) \
11527 (a_pu64Dst) = ((uint64_t const *)&pVCpu->cpum.GstCtx.XState.x87.aYMM[(a_iYReg)].au64[0])
11528#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) \
11529 do { uintptr_t const iYRegTmp = (a_iYReg); \
11530 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[0] = 0; \
11531 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegTmp].au64[1] = 0; \
11532 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegTmp); \
11533 } while (0)
11534
11535#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11536 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11537 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11538 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11539 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11540 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[0]; \
11541 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegSrcTmp].au64[1]; \
11542 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11543 } while (0)
11544#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11545 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11546 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11547 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11548 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[1]; \
11549 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11550 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11551 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11552 } while (0)
11553#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) \
11554 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11555 uintptr_t const iYRegSrcTmp = (a_iYRegSrc); \
11556 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcTmp].au64[0]; \
11557 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = 0; \
11558 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11559 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11560 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11561 } while (0)
11562
11563#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) \
11564 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11565 uintptr_t const iYRegSrc32Tmp = (a_iYRegSrc32); \
11566 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11567 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc32Tmp].au32[0]; \
11568 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au32[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au32[1]; \
11569 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11570 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11571 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11572 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11573 } while (0)
11574#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) \
11575 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11576 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11577 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11578 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[0]; \
11579 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11580 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11581 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11582 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11583 } while (0)
11584#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) /* for vmovhlps */ \
11585 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11586 uintptr_t const iYRegSrc64Tmp = (a_iYRegSrc64); \
11587 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11588 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrc64Tmp].au64[1]; \
11589 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11590 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11591 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11592 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11593 } while (0)
11594#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) \
11595 do { uintptr_t const iYRegDstTmp = (a_iYRegDst); \
11596 uintptr_t const iYRegSrcHxTmp = (a_iYRegSrcHx); \
11597 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[0] = (a_u64Local); \
11598 pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegDstTmp].au64[1] = pVCpu->cpum.GstCtx.XState.x87.aXMM[iYRegSrcHxTmp].au64[1]; \
11599 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[0] = 0; \
11600 pVCpu->cpum.GstCtx.XState.u.YmmHi.aYmmHi[iYRegDstTmp].au64[1] = 0; \
11601 IEM_MC_INT_CLEAR_ZMM_256_UP(iYRegDstTmp); \
11602 } while (0)
11603
11604#ifndef IEM_WITH_SETJMP
11605# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11606 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem)))
11607# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11608 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem16)))
11609# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11610 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &(a_u8Dst), (a_iSeg), (a_GCPtrMem32)))
11611#else
11612# define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) \
11613 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11614# define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) \
11615 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem16)))
11616# define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) \
11617 ((a_u8Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem32)))
11618#endif
11619
11620#ifndef IEM_WITH_SETJMP
11621# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11622 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem)))
11623# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11624 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &(a_u16Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11625# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11626 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, (uint16_t *)&(a_i16Dst), (a_iSeg), (a_GCPtrMem)))
11627#else
11628# define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11629 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11630# define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11631 ((a_u16Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11632# define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) \
11633 ((a_i16Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11634#endif
11635
11636#ifndef IEM_WITH_SETJMP
11637# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11638 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem)))
11639# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11640 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_u32Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11641# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11642 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, (uint32_t *)&(a_i32Dst), (a_iSeg), (a_GCPtrMem)))
11643#else
11644# define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11645 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11646# define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11647 ((a_u32Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11648# define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) \
11649 ((a_i32Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11650#endif
11651
11652#ifdef SOME_UNUSED_FUNCTION
11653# define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11654 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataS32SxU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11655#endif
11656
11657#ifndef IEM_WITH_SETJMP
11658# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11659 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11660# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11661 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11662# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11663 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64AlignedU128(pVCpu, &(a_u64Dst), (a_iSeg), (a_GCPtrMem)))
11664# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11665 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, (uint64_t *)&(a_i64Dst), (a_iSeg), (a_GCPtrMem)))
11666#else
11667# define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11668 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11669# define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
11670 ((a_u64Dst) = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem) + (a_offDisp)))
11671# define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) \
11672 ((a_u64Dst) = iemMemFetchDataU64AlignedU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11673# define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) \
11674 ((a_i64Dst) = (int64_t)iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11675#endif
11676
11677#ifndef IEM_WITH_SETJMP
11678# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11679 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &(a_r32Dst).u32, (a_iSeg), (a_GCPtrMem)))
11680# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11681 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU64(pVCpu, &(a_r64Dst).au64[0], (a_iSeg), (a_GCPtrMem)))
11682# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11683 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataR80(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem)))
11684#else
11685# define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) \
11686 ((a_r32Dst).u32 = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11687# define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) \
11688 ((a_r64Dst).au64[0] = iemMemFetchDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11689# define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) \
11690 iemMemFetchDataR80Jmp(pVCpu, &(a_r80Dst), (a_iSeg), (a_GCPtrMem))
11691#endif
11692
11693#ifndef IEM_WITH_SETJMP
11694# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11695 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11696# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11697 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU128AlignedSse(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem)))
11698#else
11699# define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) \
11700 iemMemFetchDataU128Jmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11701# define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) \
11702 iemMemFetchDataU128AlignedSseJmp(pVCpu, &(a_u128Dst), (a_iSeg), (a_GCPtrMem))
11703#endif
11704
11705#ifndef IEM_WITH_SETJMP
11706# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11707 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11708# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11709 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU256AlignedSse(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem)))
11710#else
11711# define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) \
11712 iemMemFetchDataU256Jmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11713# define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) \
11714 iemMemFetchDataU256AlignedSseJmp(pVCpu, &(a_u256Dst), (a_iSeg), (a_GCPtrMem))
11715#endif
11716
11717
11718
11719#ifndef IEM_WITH_SETJMP
11720# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11721 do { \
11722 uint8_t u8Tmp; \
11723 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11724 (a_u16Dst) = u8Tmp; \
11725 } while (0)
11726# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11727 do { \
11728 uint8_t u8Tmp; \
11729 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11730 (a_u32Dst) = u8Tmp; \
11731 } while (0)
11732# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11733 do { \
11734 uint8_t u8Tmp; \
11735 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11736 (a_u64Dst) = u8Tmp; \
11737 } while (0)
11738# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11739 do { \
11740 uint16_t u16Tmp; \
11741 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11742 (a_u32Dst) = u16Tmp; \
11743 } while (0)
11744# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11745 do { \
11746 uint16_t u16Tmp; \
11747 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11748 (a_u64Dst) = u16Tmp; \
11749 } while (0)
11750# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11751 do { \
11752 uint32_t u32Tmp; \
11753 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11754 (a_u64Dst) = u32Tmp; \
11755 } while (0)
11756#else /* IEM_WITH_SETJMP */
11757# define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11758 ((a_u16Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11759# define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11760 ((a_u32Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11761# define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11762 ((a_u64Dst) = iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11763# define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11764 ((a_u32Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11765# define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11766 ((a_u64Dst) = iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11767# define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11768 ((a_u64Dst) = iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11769#endif /* IEM_WITH_SETJMP */
11770
11771#ifndef IEM_WITH_SETJMP
11772# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11773 do { \
11774 uint8_t u8Tmp; \
11775 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11776 (a_u16Dst) = (int8_t)u8Tmp; \
11777 } while (0)
11778# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11779 do { \
11780 uint8_t u8Tmp; \
11781 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11782 (a_u32Dst) = (int8_t)u8Tmp; \
11783 } while (0)
11784# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11785 do { \
11786 uint8_t u8Tmp; \
11787 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU8(pVCpu, &u8Tmp, (a_iSeg), (a_GCPtrMem))); \
11788 (a_u64Dst) = (int8_t)u8Tmp; \
11789 } while (0)
11790# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11791 do { \
11792 uint16_t u16Tmp; \
11793 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11794 (a_u32Dst) = (int16_t)u16Tmp; \
11795 } while (0)
11796# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11797 do { \
11798 uint16_t u16Tmp; \
11799 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU16(pVCpu, &u16Tmp, (a_iSeg), (a_GCPtrMem))); \
11800 (a_u64Dst) = (int16_t)u16Tmp; \
11801 } while (0)
11802# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11803 do { \
11804 uint32_t u32Tmp; \
11805 IEM_MC_RETURN_ON_FAILURE(iemMemFetchDataU32(pVCpu, &u32Tmp, (a_iSeg), (a_GCPtrMem))); \
11806 (a_u64Dst) = (int32_t)u32Tmp; \
11807 } while (0)
11808#else /* IEM_WITH_SETJMP */
11809# define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) \
11810 ((a_u16Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11811# define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11812 ((a_u32Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11813# define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11814 ((a_u64Dst) = (int8_t)iemMemFetchDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11815# define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) \
11816 ((a_u32Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11817# define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11818 ((a_u64Dst) = (int16_t)iemMemFetchDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11819# define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) \
11820 ((a_u64Dst) = (int32_t)iemMemFetchDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem)))
11821#endif /* IEM_WITH_SETJMP */
11822
11823#ifndef IEM_WITH_SETJMP
11824# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11825 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value)))
11826# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11827 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value)))
11828# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11829 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value)))
11830# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11831 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value)))
11832#else
11833# define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) \
11834 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8Value))
11835# define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) \
11836 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16Value))
11837# define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) \
11838 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32Value))
11839# define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) \
11840 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64Value))
11841#endif
11842
11843#ifndef IEM_WITH_SETJMP
11844# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11845 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU8(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C)))
11846# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11847 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU16(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C)))
11848# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11849 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU32(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C)))
11850# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11851 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU64(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C)))
11852#else
11853# define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) \
11854 iemMemStoreDataU8Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u8C))
11855# define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) \
11856 iemMemStoreDataU16Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u16C))
11857# define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) \
11858 iemMemStoreDataU32Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u32C))
11859# define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) \
11860 iemMemStoreDataU64Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u64C))
11861#endif
11862
11863#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) *(a_pi8Dst) = (a_i8C)
11864#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) *(a_pi16Dst) = (a_i16C)
11865#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) *(a_pi32Dst) = (a_i32C)
11866#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) *(a_pi64Dst) = (a_i64C)
11867#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) (a_pr32Dst)->u32 = UINT32_C(0xffc00000)
11868#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) (a_pr64Dst)->au64[0] = UINT64_C(0xfff8000000000000)
11869#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) \
11870 do { \
11871 (a_pr80Dst)->au64[0] = UINT64_C(0xc000000000000000); \
11872 (a_pr80Dst)->au16[4] = UINT16_C(0xffff); \
11873 } while (0)
11874
11875#ifndef IEM_WITH_SETJMP
11876# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11877 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11878# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11879 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU128AlignedSse(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value)))
11880#else
11881# define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Value) \
11882 iemMemStoreDataU128Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11883# define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Value) \
11884 iemMemStoreDataU128AlignedSseJmp(pVCpu, (a_iSeg), (a_GCPtrMem), (a_u128Value))
11885#endif
11886
11887#ifndef IEM_WITH_SETJMP
11888# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11889 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11890# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11891 IEM_MC_RETURN_ON_FAILURE(iemMemStoreDataU256AlignedAvx(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value)))
11892#else
11893# define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Value) \
11894 iemMemStoreDataU256Jmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11895# define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Value) \
11896 iemMemStoreDataU256AlignedAvxJmp(pVCpu, (a_iSeg), (a_GCPtrMem), &(a_u256Value))
11897#endif
11898
11899
11900#define IEM_MC_PUSH_U16(a_u16Value) \
11901 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU16(pVCpu, (a_u16Value)))
11902#define IEM_MC_PUSH_U32(a_u32Value) \
11903 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32(pVCpu, (a_u32Value)))
11904#define IEM_MC_PUSH_U32_SREG(a_u32Value) \
11905 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU32SReg(pVCpu, (a_u32Value)))
11906#define IEM_MC_PUSH_U64(a_u64Value) \
11907 IEM_MC_RETURN_ON_FAILURE(iemMemStackPushU64(pVCpu, (a_u64Value)))
11908
11909#define IEM_MC_POP_U16(a_pu16Value) \
11910 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU16(pVCpu, (a_pu16Value)))
11911#define IEM_MC_POP_U32(a_pu32Value) \
11912 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU32(pVCpu, (a_pu32Value)))
11913#define IEM_MC_POP_U64(a_pu64Value) \
11914 IEM_MC_RETURN_ON_FAILURE(iemMemStackPopU64(pVCpu, (a_pu64Value)))
11915
11916/** Maps guest memory for direct or bounce buffered access.
11917 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11918 * @remarks May return.
11919 */
11920#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) \
11921 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pMem), sizeof(*(a_pMem)), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11922
11923/** Maps guest memory for direct or bounce buffered access.
11924 * The purpose is to pass it to an operand implementation, thus the a_iArg.
11925 * @remarks May return.
11926 */
11927#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) \
11928 IEM_MC_RETURN_ON_FAILURE(iemMemMap(pVCpu, (void **)&(a_pvMem), (a_cbMem), (a_iSeg), (a_GCPtrMem), (a_fAccess)))
11929
11930/** Commits the memory and unmaps the guest memory.
11931 * @remarks May return.
11932 */
11933#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) \
11934 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess)))
11935
11936/** Commits the memory and unmaps the guest memory unless the FPU status word
11937 * indicates (@a a_u16FSW) and FPU control word indicates a pending exception
11938 * that would cause FLD not to store.
11939 *
11940 * The current understanding is that \#O, \#U, \#IA and \#IS will prevent a
11941 * store, while \#P will not.
11942 *
11943 * @remarks May in theory return - for now.
11944 */
11945#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) \
11946 do { \
11947 if ( !(a_u16FSW & X86_FSW_ES) \
11948 || !( (a_u16FSW & (X86_FSW_UE | X86_FSW_OE | X86_FSW_IE)) \
11949 & ~(pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_MASK_ALL) ) ) \
11950 IEM_MC_RETURN_ON_FAILURE(iemMemCommitAndUnmap(pVCpu, (a_pvMem), (a_fAccess))); \
11951 } while (0)
11952
11953/** Calculate efficient address from R/M. */
11954#ifndef IEM_WITH_SETJMP
11955# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11956 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddr(pVCpu, (bRm), (cbImm), &(a_GCPtrEff)))
11957#else
11958# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) \
11959 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmp(pVCpu, (bRm), (cbImm)))
11960#endif
11961
11962#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) (a_pfn)()
11963#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) (a_pfn)((a0))
11964#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) (a_pfn)((a0), (a1))
11965#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) (a_pfn)((a0), (a1), (a2))
11966#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) (a_pfn)((a0), (a1), (a2), (a3))
11967#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) (a_rc) = (a_pfn)((a0), (a1), (a2))
11968#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) (a_rc) = (a_pfn)((a0), (a1), (a2), (a3))
11969
11970/**
11971 * Defers the rest of the instruction emulation to a C implementation routine
11972 * and returns, only taking the standard parameters.
11973 *
11974 * @param a_pfnCImpl The pointer to the C routine.
11975 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
11976 */
11977#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
11978
11979/**
11980 * Defers the rest of instruction emulation to a C implementation routine and
11981 * returns, taking one argument in addition to the standard ones.
11982 *
11983 * @param a_pfnCImpl The pointer to the C routine.
11984 * @param a0 The argument.
11985 */
11986#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
11987
11988/**
11989 * Defers the rest of the instruction emulation to a C implementation routine
11990 * and returns, taking two arguments in addition to the standard ones.
11991 *
11992 * @param a_pfnCImpl The pointer to the C routine.
11993 * @param a0 The first extra argument.
11994 * @param a1 The second extra argument.
11995 */
11996#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
11997
11998/**
11999 * Defers the rest of the instruction emulation to a C implementation routine
12000 * and returns, taking three arguments in addition to the standard ones.
12001 *
12002 * @param a_pfnCImpl The pointer to the C routine.
12003 * @param a0 The first extra argument.
12004 * @param a1 The second extra argument.
12005 * @param a2 The third extra argument.
12006 */
12007#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12008
12009/**
12010 * Defers the rest of the instruction emulation to a C implementation routine
12011 * and returns, taking four arguments in addition to the standard ones.
12012 *
12013 * @param a_pfnCImpl The pointer to the C routine.
12014 * @param a0 The first extra argument.
12015 * @param a1 The second extra argument.
12016 * @param a2 The third extra argument.
12017 * @param a3 The fourth extra argument.
12018 */
12019#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3)
12020
12021/**
12022 * Defers the rest of the instruction emulation to a C implementation routine
12023 * and returns, taking two arguments in addition to the standard ones.
12024 *
12025 * @param a_pfnCImpl The pointer to the C routine.
12026 * @param a0 The first extra argument.
12027 * @param a1 The second extra argument.
12028 * @param a2 The third extra argument.
12029 * @param a3 The fourth extra argument.
12030 * @param a4 The fifth extra argument.
12031 */
12032#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) return (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2, a3, a4)
12033
12034/**
12035 * Defers the entire instruction emulation to a C implementation routine and
12036 * returns, only taking the standard parameters.
12037 *
12038 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12039 *
12040 * @param a_pfnCImpl The pointer to the C routine.
12041 * @sa IEM_DECL_IMPL_C_TYPE_0 and IEM_CIMPL_DEF_0.
12042 */
12043#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu))
12044
12045/**
12046 * Defers the entire instruction emulation to a C implementation routine and
12047 * returns, taking one argument in addition to the standard ones.
12048 *
12049 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12050 *
12051 * @param a_pfnCImpl The pointer to the C routine.
12052 * @param a0 The argument.
12053 */
12054#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0)
12055
12056/**
12057 * Defers the entire instruction emulation to a C implementation routine and
12058 * returns, taking two arguments in addition to the standard ones.
12059 *
12060 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12061 *
12062 * @param a_pfnCImpl The pointer to the C routine.
12063 * @param a0 The first extra argument.
12064 * @param a1 The second extra argument.
12065 */
12066#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1)
12067
12068/**
12069 * Defers the entire instruction emulation to a C implementation routine and
12070 * returns, taking three arguments in addition to the standard ones.
12071 *
12072 * This shall be used without any IEM_MC_BEGIN or IEM_END macro surrounding it.
12073 *
12074 * @param a_pfnCImpl The pointer to the C routine.
12075 * @param a0 The first extra argument.
12076 * @param a1 The second extra argument.
12077 * @param a2 The third extra argument.
12078 */
12079#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (a_pfnCImpl)(pVCpu, IEM_GET_INSTR_LEN(pVCpu), a0, a1, a2)
12080
12081/**
12082 * Calls a FPU assembly implementation taking one visible argument.
12083 *
12084 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12085 * @param a0 The first extra argument.
12086 */
12087#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
12088 do { \
12089 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0)); \
12090 } while (0)
12091
12092/**
12093 * Calls a FPU assembly implementation taking two visible arguments.
12094 *
12095 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12096 * @param a0 The first extra argument.
12097 * @param a1 The second extra argument.
12098 */
12099#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
12100 do { \
12101 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12102 } while (0)
12103
12104/**
12105 * Calls a FPU assembly implementation taking three visible arguments.
12106 *
12107 * @param a_pfnAImpl Pointer to the assembly FPU routine.
12108 * @param a0 The first extra argument.
12109 * @param a1 The second extra argument.
12110 * @param a2 The third extra argument.
12111 */
12112#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12113 do { \
12114 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12115 } while (0)
12116
12117#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) \
12118 do { \
12119 (a_FpuData).FSW = (a_FSW); \
12120 (a_FpuData).r80Result = *(a_pr80Value); \
12121 } while (0)
12122
12123/** Pushes FPU result onto the stack. */
12124#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) \
12125 iemFpuPushResult(pVCpu, &a_FpuData)
12126/** Pushes FPU result onto the stack and sets the FPUDP. */
12127#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) \
12128 iemFpuPushResultWithMemOp(pVCpu, &a_FpuData, a_iEffSeg, a_GCPtrEff)
12129
12130/** Replaces ST0 with value one and pushes value 2 onto the FPU stack. */
12131#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) \
12132 iemFpuPushResultTwo(pVCpu, &a_FpuDataTwo)
12133
12134/** Stores FPU result in a stack register. */
12135#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) \
12136 iemFpuStoreResult(pVCpu, &a_FpuData, a_iStReg)
12137/** Stores FPU result in a stack register and pops the stack. */
12138#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) \
12139 iemFpuStoreResultThenPop(pVCpu, &a_FpuData, a_iStReg)
12140/** Stores FPU result in a stack register and sets the FPUDP. */
12141#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12142 iemFpuStoreResultWithMemOp(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12143/** Stores FPU result in a stack register, sets the FPUDP, and pops the
12144 * stack. */
12145#define IEM_MC_STORE_FPU_RESULT_WITH_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) \
12146 iemFpuStoreResultWithMemOpThenPop(pVCpu, &a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff)
12147
12148/** Only update the FOP, FPUIP, and FPUCS. (For FNOP.) */
12149#define IEM_MC_UPDATE_FPU_OPCODE_IP() \
12150 iemFpuUpdateOpcodeAndIp(pVCpu)
12151/** Free a stack register (for FFREE and FFREEP). */
12152#define IEM_MC_FPU_STACK_FREE(a_iStReg) \
12153 iemFpuStackFree(pVCpu, a_iStReg)
12154/** Increment the FPU stack pointer. */
12155#define IEM_MC_FPU_STACK_INC_TOP() \
12156 iemFpuStackIncTop(pVCpu)
12157/** Decrement the FPU stack pointer. */
12158#define IEM_MC_FPU_STACK_DEC_TOP() \
12159 iemFpuStackDecTop(pVCpu)
12160
12161/** Updates the FSW, FOP, FPUIP, and FPUCS. */
12162#define IEM_MC_UPDATE_FSW(a_u16FSW) \
12163 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12164/** Updates the FSW with a constant value as well as FOP, FPUIP, and FPUCS. */
12165#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) \
12166 iemFpuUpdateFSW(pVCpu, a_u16FSW)
12167/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP, and FPUDS. */
12168#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12169 iemFpuUpdateFSWWithMemOp(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12170/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack. */
12171#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) \
12172 iemFpuUpdateFSWThenPop(pVCpu, a_u16FSW)
12173/** Updates the FSW, FOP, FPUIP, FPUCS, FPUDP and FPUDS, and then pops the
12174 * stack. */
12175#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) \
12176 iemFpuUpdateFSWWithMemOpThenPop(pVCpu, a_u16FSW, a_iEffSeg, a_GCPtrEff)
12177/** Updates the FSW, FOP, FPUIP, and FPUCS, and then pops the stack twice. */
12178#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) \
12179 iemFpuUpdateFSWThenPopPop(pVCpu, a_u16FSW)
12180
12181/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. */
12182#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStDst) \
12183 iemFpuStackUnderflow(pVCpu, a_iStDst)
12184/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12185 * stack. */
12186#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStDst) \
12187 iemFpuStackUnderflowThenPop(pVCpu, a_iStDst)
12188/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12189 * FPUDS. */
12190#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12191 iemFpuStackUnderflowWithMemOp(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12192/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS, FOP, FPUDP and
12193 * FPUDS. Pops stack. */
12194#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStDst, a_iEffSeg, a_GCPtrEff) \
12195 iemFpuStackUnderflowWithMemOpThenPop(pVCpu, a_iStDst, a_iEffSeg, a_GCPtrEff)
12196/** Raises a FPU stack underflow exception. Sets FPUIP, FPUCS and FOP. Pops
12197 * stack twice. */
12198#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() \
12199 iemFpuStackUnderflowThenPopPop(pVCpu)
12200/** Raises a FPU stack underflow exception for an instruction pushing a result
12201 * value onto the stack. Sets FPUIP, FPUCS and FOP. */
12202#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() \
12203 iemFpuStackPushUnderflow(pVCpu)
12204/** Raises a FPU stack underflow exception for an instruction pushing a result
12205 * value onto the stack and replacing ST0. Sets FPUIP, FPUCS and FOP. */
12206#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() \
12207 iemFpuStackPushUnderflowTwo(pVCpu)
12208
12209/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12210 * FPUIP, FPUCS and FOP. */
12211#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() \
12212 iemFpuStackPushOverflow(pVCpu)
12213/** Raises a FPU stack overflow exception as part of a push attempt. Sets
12214 * FPUIP, FPUCS, FOP, FPUDP and FPUDS. */
12215#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) \
12216 iemFpuStackPushOverflowWithMemOp(pVCpu, a_iEffSeg, a_GCPtrEff)
12217/** Prepares for using the FPU state.
12218 * Ensures that we can use the host FPU in the current context (RC+R0.
12219 * Ensures the guest FPU state in the CPUMCTX is up to date. */
12220#define IEM_MC_PREPARE_FPU_USAGE() iemFpuPrepareUsage(pVCpu)
12221/** Actualizes the guest FPU state so it can be accessed read-only fashion. */
12222#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() iemFpuActualizeStateForRead(pVCpu)
12223/** Actualizes the guest FPU state so it can be accessed and modified. */
12224#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() iemFpuActualizeStateForChange(pVCpu)
12225
12226/** Prepares for using the SSE state.
12227 * Ensures that we can use the host SSE/FPU in the current context (RC+R0.
12228 * Ensures the guest SSE state in the CPUMCTX is up to date. */
12229#define IEM_MC_PREPARE_SSE_USAGE() iemFpuPrepareUsageSse(pVCpu)
12230/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12231#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() iemFpuActualizeSseStateForRead(pVCpu)
12232/** Actualizes the guest XMM0..15 and MXCSR register state for read-write access. */
12233#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() iemFpuActualizeSseStateForChange(pVCpu)
12234
12235/** Prepares for using the AVX state.
12236 * Ensures that we can use the host AVX/FPU in the current context (RC+R0.
12237 * Ensures the guest AVX state in the CPUMCTX is up to date.
12238 * @note This will include the AVX512 state too when support for it is added
12239 * due to the zero extending feature of VEX instruction. */
12240#define IEM_MC_PREPARE_AVX_USAGE() iemFpuPrepareUsageAvx(pVCpu)
12241/** Actualizes the guest XMM0..15 and MXCSR register state for read-only access. */
12242#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() iemFpuActualizeAvxStateForRead(pVCpu)
12243/** Actualizes the guest YMM0..15 and MXCSR register state for read-write access. */
12244#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() iemFpuActualizeAvxStateForChange(pVCpu)
12245
12246/**
12247 * Calls a MMX assembly implementation taking two visible arguments.
12248 *
12249 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12250 * @param a0 The first extra argument.
12251 * @param a1 The second extra argument.
12252 */
12253#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
12254 do { \
12255 IEM_MC_PREPARE_FPU_USAGE(); \
12256 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12257 } while (0)
12258
12259/**
12260 * Calls a MMX assembly implementation taking three visible arguments.
12261 *
12262 * @param a_pfnAImpl Pointer to the assembly MMX routine.
12263 * @param a0 The first extra argument.
12264 * @param a1 The second extra argument.
12265 * @param a2 The third extra argument.
12266 */
12267#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12268 do { \
12269 IEM_MC_PREPARE_FPU_USAGE(); \
12270 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12271 } while (0)
12272
12273
12274/**
12275 * Calls a SSE assembly implementation taking two visible arguments.
12276 *
12277 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12278 * @param a0 The first extra argument.
12279 * @param a1 The second extra argument.
12280 */
12281#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
12282 do { \
12283 IEM_MC_PREPARE_SSE_USAGE(); \
12284 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1)); \
12285 } while (0)
12286
12287/**
12288 * Calls a SSE assembly implementation taking three visible arguments.
12289 *
12290 * @param a_pfnAImpl Pointer to the assembly SSE routine.
12291 * @param a0 The first extra argument.
12292 * @param a1 The second extra argument.
12293 * @param a2 The third extra argument.
12294 */
12295#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
12296 do { \
12297 IEM_MC_PREPARE_SSE_USAGE(); \
12298 a_pfnAImpl(&pVCpu->cpum.GstCtx.XState.x87, (a0), (a1), (a2)); \
12299 } while (0)
12300
12301
12302/** Declares implicit arguments for IEM_MC_CALL_AVX_AIMPL_2,
12303 * IEM_MC_CALL_AVX_AIMPL_3, IEM_MC_CALL_AVX_AIMPL_4, ... */
12304#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() \
12305 IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, &pVCpu->cpum.GstCtx.XState, 0)
12306
12307/**
12308 * Calls a AVX assembly implementation taking two visible arguments.
12309 *
12310 * There is one implicit zero'th argument, a pointer to the extended state.
12311 *
12312 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12313 * @param a1 The first extra argument.
12314 * @param a2 The second extra argument.
12315 */
12316#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
12317 do { \
12318 IEM_MC_PREPARE_AVX_USAGE(); \
12319 a_pfnAImpl(pXState, (a1), (a2)); \
12320 } while (0)
12321
12322/**
12323 * Calls a AVX assembly implementation taking three visible arguments.
12324 *
12325 * There is one implicit zero'th argument, a pointer to the extended state.
12326 *
12327 * @param a_pfnAImpl Pointer to the assembly AVX routine.
12328 * @param a1 The first extra argument.
12329 * @param a2 The second extra argument.
12330 * @param a3 The third extra argument.
12331 */
12332#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
12333 do { \
12334 IEM_MC_PREPARE_AVX_USAGE(); \
12335 a_pfnAImpl(pXState, (a1), (a2), (a3)); \
12336 } while (0)
12337
12338/** @note Not for IOPL or IF testing. */
12339#define IEM_MC_IF_EFL_BIT_SET(a_fBit) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) {
12340/** @note Not for IOPL or IF testing. */
12341#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit))) {
12342/** @note Not for IOPL or IF testing. */
12343#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) if (pVCpu->cpum.GstCtx.eflags.u & (a_fBits)) {
12344/** @note Not for IOPL or IF testing. */
12345#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) if (!(pVCpu->cpum.GstCtx.eflags.u & (a_fBits))) {
12346/** @note Not for IOPL or IF testing. */
12347#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) \
12348 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12349 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12350/** @note Not for IOPL or IF testing. */
12351#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) \
12352 if ( !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12353 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12354/** @note Not for IOPL or IF testing. */
12355#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) \
12356 if ( (pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12357 || !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12358 != !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12359/** @note Not for IOPL or IF testing. */
12360#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) \
12361 if ( !(pVCpu->cpum.GstCtx.eflags.u & (a_fBit)) \
12362 && !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit1)) \
12363 == !!(pVCpu->cpum.GstCtx.eflags.u & (a_fBit2)) ) {
12364#define IEM_MC_IF_CX_IS_NZ() if (pVCpu->cpum.GstCtx.cx != 0) {
12365#define IEM_MC_IF_ECX_IS_NZ() if (pVCpu->cpum.GstCtx.ecx != 0) {
12366#define IEM_MC_IF_RCX_IS_NZ() if (pVCpu->cpum.GstCtx.rcx != 0) {
12367/** @note Not for IOPL or IF testing. */
12368#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12369 if ( pVCpu->cpum.GstCtx.cx != 0 \
12370 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12371/** @note Not for IOPL or IF testing. */
12372#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12373 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12374 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12375/** @note Not for IOPL or IF testing. */
12376#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) \
12377 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12378 && (pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12379/** @note Not for IOPL or IF testing. */
12380#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12381 if ( pVCpu->cpum.GstCtx.cx != 0 \
12382 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12383/** @note Not for IOPL or IF testing. */
12384#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12385 if ( pVCpu->cpum.GstCtx.ecx != 0 \
12386 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12387/** @note Not for IOPL or IF testing. */
12388#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) \
12389 if ( pVCpu->cpum.GstCtx.rcx != 0 \
12390 && !(pVCpu->cpum.GstCtx.eflags.u & a_fBit)) {
12391#define IEM_MC_IF_LOCAL_IS_Z(a_Local) if ((a_Local) == 0) {
12392#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) if (iemGRegFetchU64(pVCpu, (a_iGReg)) & RT_BIT_64(a_iBitNo)) {
12393
12394#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) \
12395 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) == VINF_SUCCESS) {
12396#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) \
12397 if (iemFpuStRegNotEmpty(pVCpu, (a_iSt)) != VINF_SUCCESS) {
12398#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) \
12399 if (iemFpuStRegNotEmptyRef(pVCpu, (a_iSt), &(a_pr80Dst)) == VINF_SUCCESS) {
12400#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(a_pr80Dst0, a_iSt0, a_pr80Dst1, a_iSt1) \
12401 if (iemFpu2StRegsNotEmptyRef(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1), &(a_pr80Dst1)) == VINF_SUCCESS) {
12402#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(a_pr80Dst0, a_iSt0, a_iSt1) \
12403 if (iemFpu2StRegsNotEmptyRefFirst(pVCpu, (a_iSt0), &(a_pr80Dst0), (a_iSt1)) == VINF_SUCCESS) {
12404#define IEM_MC_IF_FCW_IM() \
12405 if (pVCpu->cpum.GstCtx.XState.x87.FCW & X86_FCW_IM) {
12406
12407#define IEM_MC_ELSE() } else {
12408#define IEM_MC_ENDIF() } do {} while (0)
12409
12410/** @} */
12411
12412
12413/** @name Opcode Debug Helpers.
12414 * @{
12415 */
12416#ifdef VBOX_WITH_STATISTICS
12417# ifdef IN_RING3
12418# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsR3.a_Stats += 1; } while (0)
12419# else
12420# define IEMOP_INC_STATS(a_Stats) do { pVCpu->iem.s.StatsRZ.a_Stats += 1; } while (0)
12421# endif
12422#else
12423# define IEMOP_INC_STATS(a_Stats) do { } while (0)
12424#endif
12425
12426#ifdef DEBUG
12427# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) \
12428 do { \
12429 IEMOP_INC_STATS(a_Stats); \
12430 Log4(("decode - %04x:%RGv %s%s [#%u]\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, \
12431 pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK ? "lock " : "", a_szMnemonic, pVCpu->iem.s.cInstructions)); \
12432 } while (0)
12433
12434# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12435 do { \
12436 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12437 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12438 (void)RT_CONCAT(OP_,a_Upper); \
12439 (void)(a_fDisHints); \
12440 (void)(a_fIemHints); \
12441 } while (0)
12442
12443# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12444 do { \
12445 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12446 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12447 (void)RT_CONCAT(OP_,a_Upper); \
12448 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12449 (void)(a_fDisHints); \
12450 (void)(a_fIemHints); \
12451 } while (0)
12452
12453# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12454 do { \
12455 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12456 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12457 (void)RT_CONCAT(OP_,a_Upper); \
12458 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12459 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12460 (void)(a_fDisHints); \
12461 (void)(a_fIemHints); \
12462 } while (0)
12463
12464# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12465 do { \
12466 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12467 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12468 (void)RT_CONCAT(OP_,a_Upper); \
12469 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12470 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12471 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12472 (void)(a_fDisHints); \
12473 (void)(a_fIemHints); \
12474 } while (0)
12475
12476# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12477 do { \
12478 IEMOP_MNEMONIC(a_Stats, a_szMnemonic); \
12479 (void)RT_CONCAT(IEMOPFORM_, a_Form); \
12480 (void)RT_CONCAT(OP_,a_Upper); \
12481 (void)RT_CONCAT(OP_PARM_,a_Op1); \
12482 (void)RT_CONCAT(OP_PARM_,a_Op2); \
12483 (void)RT_CONCAT(OP_PARM_,a_Op3); \
12484 (void)RT_CONCAT(OP_PARM_,a_Op4); \
12485 (void)(a_fDisHints); \
12486 (void)(a_fIemHints); \
12487 } while (0)
12488
12489#else
12490# define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) IEMOP_INC_STATS(a_Stats)
12491
12492# define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12493 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12494# define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12495 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12496# define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12497 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12498# define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12499 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12500# define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12501 IEMOP_MNEMONIC(a_Stats, a_szMnemonic)
12502
12503#endif
12504
12505#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) \
12506 IEMOP_MNEMONIC0EX(a_Lower, \
12507 #a_Lower, \
12508 a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints)
12509#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) \
12510 IEMOP_MNEMONIC1EX(RT_CONCAT3(a_Lower,_,a_Op1), \
12511 #a_Lower " " #a_Op1, \
12512 a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints)
12513#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) \
12514 IEMOP_MNEMONIC2EX(RT_CONCAT5(a_Lower,_,a_Op1,_,a_Op2), \
12515 #a_Lower " " #a_Op1 "," #a_Op2, \
12516 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints)
12517#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) \
12518 IEMOP_MNEMONIC3EX(RT_CONCAT7(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3), \
12519 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3, \
12520 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints)
12521#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) \
12522 IEMOP_MNEMONIC4EX(RT_CONCAT9(a_Lower,_,a_Op1,_,a_Op2,_,a_Op3,_,a_Op4), \
12523 #a_Lower " " #a_Op1 "," #a_Op2 "," #a_Op3 "," #a_Op4, \
12524 a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints)
12525
12526/** @} */
12527
12528
12529/** @name Opcode Helpers.
12530 * @{
12531 */
12532
12533#ifdef IN_RING3
12534# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12535 do { \
12536 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12537 else \
12538 { \
12539 (void)DBGFSTOP(pVCpu->CTX_SUFF(pVM)); \
12540 return IEMOP_RAISE_INVALID_OPCODE(); \
12541 } \
12542 } while (0)
12543#else
12544# define IEMOP_HLP_MIN_CPU(a_uMinCpu, a_fOnlyIf) \
12545 do { \
12546 if (IEM_GET_TARGET_CPU(pVCpu) >= (a_uMinCpu) || !(a_fOnlyIf)) { } \
12547 else return IEMOP_RAISE_INVALID_OPCODE(); \
12548 } while (0)
12549#endif
12550
12551/** The instruction requires a 186 or later. */
12552#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_186
12553# define IEMOP_HLP_MIN_186() do { } while (0)
12554#else
12555# define IEMOP_HLP_MIN_186() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_186, true)
12556#endif
12557
12558/** The instruction requires a 286 or later. */
12559#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_286
12560# define IEMOP_HLP_MIN_286() do { } while (0)
12561#else
12562# define IEMOP_HLP_MIN_286() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_286, true)
12563#endif
12564
12565/** The instruction requires a 386 or later. */
12566#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12567# define IEMOP_HLP_MIN_386() do { } while (0)
12568#else
12569# define IEMOP_HLP_MIN_386() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, true)
12570#endif
12571
12572/** The instruction requires a 386 or later if the given expression is true. */
12573#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_386
12574# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) do { } while (0)
12575#else
12576# define IEMOP_HLP_MIN_386_EX(a_fOnlyIf) IEMOP_HLP_MIN_CPU(IEMTARGETCPU_386, a_fOnlyIf)
12577#endif
12578
12579/** The instruction requires a 486 or later. */
12580#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_486
12581# define IEMOP_HLP_MIN_486() do { } while (0)
12582#else
12583# define IEMOP_HLP_MIN_486() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_486, true)
12584#endif
12585
12586/** The instruction requires a Pentium (586) or later. */
12587#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PENTIUM
12588# define IEMOP_HLP_MIN_586() do { } while (0)
12589#else
12590# define IEMOP_HLP_MIN_586() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PENTIUM, true)
12591#endif
12592
12593/** The instruction requires a PentiumPro (686) or later. */
12594#if IEM_CFG_TARGET_CPU >= IEMTARGETCPU_PPRO
12595# define IEMOP_HLP_MIN_686() do { } while (0)
12596#else
12597# define IEMOP_HLP_MIN_686() IEMOP_HLP_MIN_CPU(IEMTARGETCPU_PPRO, true)
12598#endif
12599
12600
12601/** The instruction raises an \#UD in real and V8086 mode. */
12602#define IEMOP_HLP_NO_REAL_OR_V86_MODE() \
12603 do \
12604 { \
12605 if (!IEM_IS_REAL_OR_V86_MODE(pVCpu)) { /* likely */ } \
12606 else return IEMOP_RAISE_INVALID_OPCODE(); \
12607 } while (0)
12608
12609#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
12610/** This instruction raises an \#UD in real and V8086 mode or when not using a
12611 * 64-bit code segment when in long mode (applicable to all VMX instructions
12612 * except VMCALL).
12613 */
12614#define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) \
12615 do \
12616 { \
12617 if ( !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12618 && ( !IEM_IS_LONG_MODE(pVCpu) \
12619 || IEM_IS_64BIT_CODE(pVCpu))) \
12620 { /* likely */ } \
12621 else \
12622 { \
12623 if (IEM_IS_REAL_OR_V86_MODE(pVCpu)) \
12624 { \
12625 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_RealOrV86Mode; \
12626 Log5((a_szInstr ": Real or v8086 mode -> #UD\n")); \
12627 return IEMOP_RAISE_INVALID_OPCODE(); \
12628 } \
12629 if (IEM_IS_LONG_MODE(pVCpu) && !IEM_IS_64BIT_CODE(pVCpu)) \
12630 { \
12631 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_LongModeCS; \
12632 Log5((a_szInstr ": Long mode without 64-bit code segment -> #UD\n")); \
12633 return IEMOP_RAISE_INVALID_OPCODE(); \
12634 } \
12635 } \
12636 } while (0)
12637
12638/** The instruction can only be executed in VMX operation (VMX root mode and
12639 * non-root mode).
12640 *
12641 * @note Update IEM_VMX_IN_VMX_OPERATION if changes are made here.
12642 */
12643# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) \
12644 do \
12645 { \
12646 if (IEM_VMX_IS_ROOT_MODE(pVCpu)) { /* likely */ } \
12647 else \
12648 { \
12649 pVCpu->cpum.GstCtx.hwvirt.vmx.enmDiag = a_InsDiagPrefix##_VmxRoot; \
12650 Log5((a_szInstr ": Not in VMX operation (root mode) -> #UD\n")); \
12651 return IEMOP_RAISE_INVALID_OPCODE(); \
12652 } \
12653 } while (0)
12654#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
12655
12656/** The instruction is not available in 64-bit mode, throw \#UD if we're in
12657 * 64-bit mode. */
12658#define IEMOP_HLP_NO_64BIT() \
12659 do \
12660 { \
12661 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12662 return IEMOP_RAISE_INVALID_OPCODE(); \
12663 } while (0)
12664
12665/** The instruction is only available in 64-bit mode, throw \#UD if we're not in
12666 * 64-bit mode. */
12667#define IEMOP_HLP_ONLY_64BIT() \
12668 do \
12669 { \
12670 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT) \
12671 return IEMOP_RAISE_INVALID_OPCODE(); \
12672 } while (0)
12673
12674/** The instruction defaults to 64-bit operand size if 64-bit mode. */
12675#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() \
12676 do \
12677 { \
12678 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12679 iemRecalEffOpSize64Default(pVCpu); \
12680 } while (0)
12681
12682/** The instruction has 64-bit operand size if 64-bit mode. */
12683#define IEMOP_HLP_64BIT_OP_SIZE() \
12684 do \
12685 { \
12686 if (pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT) \
12687 pVCpu->iem.s.enmEffOpSize = pVCpu->iem.s.enmDefOpSize = IEMMODE_64BIT; \
12688 } while (0)
12689
12690/** Only a REX prefix immediately preceeding the first opcode byte takes
12691 * effect. This macro helps ensuring this as well as logging bad guest code. */
12692#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) \
12693 do \
12694 { \
12695 if (RT_UNLIKELY(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_REX)) \
12696 { \
12697 Log5((a_szPrf ": Overriding REX prefix at %RX16! fPrefixes=%#x\n", pVCpu->cpum.GstCtx.rip, pVCpu->iem.s.fPrefixes)); \
12698 pVCpu->iem.s.fPrefixes &= ~IEM_OP_PRF_REX_MASK; \
12699 pVCpu->iem.s.uRexB = 0; \
12700 pVCpu->iem.s.uRexIndex = 0; \
12701 pVCpu->iem.s.uRexReg = 0; \
12702 iemRecalEffOpSize(pVCpu); \
12703 } \
12704 } while (0)
12705
12706/**
12707 * Done decoding.
12708 */
12709#define IEMOP_HLP_DONE_DECODING() \
12710 do \
12711 { \
12712 /*nothing for now, maybe later... */ \
12713 } while (0)
12714
12715/**
12716 * Done decoding, raise \#UD exception if lock prefix present.
12717 */
12718#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() \
12719 do \
12720 { \
12721 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12722 { /* likely */ } \
12723 else \
12724 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12725 } while (0)
12726
12727
12728/**
12729 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12730 * repnz or size prefixes are present, or if in real or v8086 mode.
12731 */
12732#define IEMOP_HLP_DONE_VEX_DECODING() \
12733 do \
12734 { \
12735 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12736 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12737 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12738 { /* likely */ } \
12739 else \
12740 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12741 } while (0)
12742
12743/**
12744 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12745 * repnz or size prefixes are present, or if in real or v8086 mode.
12746 */
12747#define IEMOP_HLP_DONE_VEX_DECODING_L0() \
12748 do \
12749 { \
12750 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12751 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12752 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) \
12753 && pVCpu->iem.s.uVexLength == 0)) \
12754 { /* likely */ } \
12755 else \
12756 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12757 } while (0)
12758
12759
12760/**
12761 * Done decoding VEX instruction, raise \#UD exception if any lock, rex, repz,
12762 * repnz or size prefixes are present, or if the VEX.VVVV field doesn't indicate
12763 * register 0, or if in real or v8086 mode.
12764 */
12765#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() \
12766 do \
12767 { \
12768 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12769 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REX)) \
12770 && !pVCpu->iem.s.uVex3rdReg \
12771 && !IEM_IS_REAL_OR_V86_MODE(pVCpu) )) \
12772 { /* likely */ } \
12773 else \
12774 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12775 } while (0)
12776
12777/**
12778 * Done decoding VEX, no V, L=0.
12779 * Raises \#UD exception if rex, rep, opsize or lock prefixes are present, if
12780 * we're in real or v8086 mode, if VEX.V!=0xf, or if VEX.L!=0.
12781 */
12782#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() \
12783 do \
12784 { \
12785 if (RT_LIKELY( !( pVCpu->iem.s.fPrefixes \
12786 & (IEM_OP_PRF_LOCK | IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPZ | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REX)) \
12787 && pVCpu->iem.s.uVexLength == 0 \
12788 && pVCpu->iem.s.uVex3rdReg == 0 \
12789 && !IEM_IS_REAL_OR_V86_MODE(pVCpu))) \
12790 { /* likely */ } \
12791 else \
12792 return IEMOP_RAISE_INVALID_OPCODE(); \
12793 } while (0)
12794
12795#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) \
12796 do \
12797 { \
12798 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12799 { /* likely */ } \
12800 else \
12801 { \
12802 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_fDisOpType); \
12803 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12804 } \
12805 } while (0)
12806#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) \
12807 do \
12808 { \
12809 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_LOCK))) \
12810 { /* likely */ } \
12811 else \
12812 { \
12813 NOREF(a_uDisOpNo); NOREF(a_fIemOpFlags); NOREF(a_uDisParam0); NOREF(a_uDisParam1); NOREF(a_fDisOpType); \
12814 return IEMOP_RAISE_INVALID_LOCK_PREFIX(); \
12815 } \
12816 } while (0)
12817
12818/**
12819 * Done decoding, raise \#UD exception if any lock, repz or repnz prefixes
12820 * are present.
12821 */
12822#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() \
12823 do \
12824 { \
12825 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_LOCK | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12826 { /* likely */ } \
12827 else \
12828 return IEMOP_RAISE_INVALID_OPCODE(); \
12829 } while (0)
12830
12831/**
12832 * Done decoding, raise \#UD exception if any operand-size override, repz or repnz
12833 * prefixes are present.
12834 */
12835#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() \
12836 do \
12837 { \
12838 if (RT_LIKELY(!(pVCpu->iem.s.fPrefixes & (IEM_OP_PRF_SIZE_OP | IEM_OP_PRF_REPNZ | IEM_OP_PRF_REPZ)))) \
12839 { /* likely */ } \
12840 else \
12841 return IEMOP_RAISE_INVALID_OPCODE(); \
12842 } while (0)
12843
12844
12845/**
12846 * Calculates the effective address of a ModR/M memory operand.
12847 *
12848 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
12849 *
12850 * @return Strict VBox status code.
12851 * @param pVCpu The cross context virtual CPU structure of the calling thread.
12852 * @param bRm The ModRM byte.
12853 * @param cbImm The size of any immediate following the
12854 * effective address opcode bytes. Important for
12855 * RIP relative addressing.
12856 * @param pGCPtrEff Where to return the effective address.
12857 */
12858IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff)
12859{
12860 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
12861# define SET_SS_DEF() \
12862 do \
12863 { \
12864 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
12865 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
12866 } while (0)
12867
12868 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
12869 {
12870/** @todo Check the effective address size crap! */
12871 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
12872 {
12873 uint16_t u16EffAddr;
12874
12875 /* Handle the disp16 form with no registers first. */
12876 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
12877 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
12878 else
12879 {
12880 /* Get the displacment. */
12881 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12882 {
12883 case 0: u16EffAddr = 0; break;
12884 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
12885 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
12886 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
12887 }
12888
12889 /* Add the base and index registers to the disp. */
12890 switch (bRm & X86_MODRM_RM_MASK)
12891 {
12892 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
12893 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
12894 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
12895 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
12896 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
12897 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
12898 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
12899 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
12900 }
12901 }
12902
12903 *pGCPtrEff = u16EffAddr;
12904 }
12905 else
12906 {
12907 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
12908 uint32_t u32EffAddr;
12909
12910 /* Handle the disp32 form with no registers first. */
12911 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
12912 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
12913 else
12914 {
12915 /* Get the register (or SIB) value. */
12916 switch ((bRm & X86_MODRM_RM_MASK))
12917 {
12918 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12919 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12920 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12921 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12922 case 4: /* SIB */
12923 {
12924 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
12925
12926 /* Get the index and scale it. */
12927 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
12928 {
12929 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
12930 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
12931 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
12932 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
12933 case 4: u32EffAddr = 0; /*none */ break;
12934 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
12935 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12936 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12937 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12938 }
12939 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
12940
12941 /* add base */
12942 switch (bSib & X86_SIB_BASE_MASK)
12943 {
12944 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
12945 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
12946 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
12947 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
12948 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
12949 case 5:
12950 if ((bRm & X86_MODRM_MOD_MASK) != 0)
12951 {
12952 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
12953 SET_SS_DEF();
12954 }
12955 else
12956 {
12957 uint32_t u32Disp;
12958 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12959 u32EffAddr += u32Disp;
12960 }
12961 break;
12962 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
12963 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
12964 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12965 }
12966 break;
12967 }
12968 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
12969 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
12970 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
12971 IEM_NOT_REACHED_DEFAULT_CASE_RET();
12972 }
12973
12974 /* Get and add the displacement. */
12975 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
12976 {
12977 case 0:
12978 break;
12979 case 1:
12980 {
12981 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
12982 u32EffAddr += i8Disp;
12983 break;
12984 }
12985 case 2:
12986 {
12987 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
12988 u32EffAddr += u32Disp;
12989 break;
12990 }
12991 default:
12992 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
12993 }
12994
12995 }
12996 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
12997 *pGCPtrEff = u32EffAddr;
12998 else
12999 {
13000 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13001 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13002 }
13003 }
13004 }
13005 else
13006 {
13007 uint64_t u64EffAddr;
13008
13009 /* Handle the rip+disp32 form with no registers first. */
13010 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13011 {
13012 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13013 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13014 }
13015 else
13016 {
13017 /* Get the register (or SIB) value. */
13018 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13019 {
13020 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13021 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13022 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13023 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13024 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13025 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13026 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13027 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13028 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13029 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13030 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13031 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13032 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13033 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13034 /* SIB */
13035 case 4:
13036 case 12:
13037 {
13038 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13039
13040 /* Get the index and scale it. */
13041 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13042 {
13043 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13044 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13045 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13046 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13047 case 4: u64EffAddr = 0; /*none */ break;
13048 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13049 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13050 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13051 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13052 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13053 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13054 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13055 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13056 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13057 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13058 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13059 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13060 }
13061 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13062
13063 /* add base */
13064 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13065 {
13066 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13067 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13068 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13069 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13070 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13071 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13072 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13073 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13074 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13075 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13076 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13077 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13078 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13079 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13080 /* complicated encodings */
13081 case 5:
13082 case 13:
13083 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13084 {
13085 if (!pVCpu->iem.s.uRexB)
13086 {
13087 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13088 SET_SS_DEF();
13089 }
13090 else
13091 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13092 }
13093 else
13094 {
13095 uint32_t u32Disp;
13096 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13097 u64EffAddr += (int32_t)u32Disp;
13098 }
13099 break;
13100 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13101 }
13102 break;
13103 }
13104 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13105 }
13106
13107 /* Get and add the displacement. */
13108 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13109 {
13110 case 0:
13111 break;
13112 case 1:
13113 {
13114 int8_t i8Disp;
13115 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13116 u64EffAddr += i8Disp;
13117 break;
13118 }
13119 case 2:
13120 {
13121 uint32_t u32Disp;
13122 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13123 u64EffAddr += (int32_t)u32Disp;
13124 break;
13125 }
13126 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13127 }
13128
13129 }
13130
13131 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13132 *pGCPtrEff = u64EffAddr;
13133 else
13134 {
13135 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13136 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13137 }
13138 }
13139
13140 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13141 return VINF_SUCCESS;
13142}
13143
13144
13145/**
13146 * Calculates the effective address of a ModR/M memory operand.
13147 *
13148 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13149 *
13150 * @return Strict VBox status code.
13151 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13152 * @param bRm The ModRM byte.
13153 * @param cbImm The size of any immediate following the
13154 * effective address opcode bytes. Important for
13155 * RIP relative addressing.
13156 * @param pGCPtrEff Where to return the effective address.
13157 * @param offRsp RSP displacement.
13158 */
13159IEM_STATIC VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp)
13160{
13161 Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
13162# define SET_SS_DEF() \
13163 do \
13164 { \
13165 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13166 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13167 } while (0)
13168
13169 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13170 {
13171/** @todo Check the effective address size crap! */
13172 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13173 {
13174 uint16_t u16EffAddr;
13175
13176 /* Handle the disp16 form with no registers first. */
13177 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13178 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13179 else
13180 {
13181 /* Get the displacment. */
13182 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13183 {
13184 case 0: u16EffAddr = 0; break;
13185 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13186 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13187 default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
13188 }
13189
13190 /* Add the base and index registers to the disp. */
13191 switch (bRm & X86_MODRM_RM_MASK)
13192 {
13193 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13194 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13195 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13196 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13197 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13198 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13199 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13200 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13201 }
13202 }
13203
13204 *pGCPtrEff = u16EffAddr;
13205 }
13206 else
13207 {
13208 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13209 uint32_t u32EffAddr;
13210
13211 /* Handle the disp32 form with no registers first. */
13212 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13213 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13214 else
13215 {
13216 /* Get the register (or SIB) value. */
13217 switch ((bRm & X86_MODRM_RM_MASK))
13218 {
13219 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13220 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13221 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13222 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13223 case 4: /* SIB */
13224 {
13225 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13226
13227 /* Get the index and scale it. */
13228 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13229 {
13230 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13231 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13232 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13233 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13234 case 4: u32EffAddr = 0; /*none */ break;
13235 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13236 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13237 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13238 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13239 }
13240 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13241
13242 /* add base */
13243 switch (bSib & X86_SIB_BASE_MASK)
13244 {
13245 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13246 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13247 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13248 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13249 case 4:
13250 u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
13251 SET_SS_DEF();
13252 break;
13253 case 5:
13254 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13255 {
13256 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13257 SET_SS_DEF();
13258 }
13259 else
13260 {
13261 uint32_t u32Disp;
13262 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13263 u32EffAddr += u32Disp;
13264 }
13265 break;
13266 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13267 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13268 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13269 }
13270 break;
13271 }
13272 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13273 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13274 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13275 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13276 }
13277
13278 /* Get and add the displacement. */
13279 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13280 {
13281 case 0:
13282 break;
13283 case 1:
13284 {
13285 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13286 u32EffAddr += i8Disp;
13287 break;
13288 }
13289 case 2:
13290 {
13291 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13292 u32EffAddr += u32Disp;
13293 break;
13294 }
13295 default:
13296 AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
13297 }
13298
13299 }
13300 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13301 *pGCPtrEff = u32EffAddr;
13302 else
13303 {
13304 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13305 *pGCPtrEff = u32EffAddr & UINT16_MAX;
13306 }
13307 }
13308 }
13309 else
13310 {
13311 uint64_t u64EffAddr;
13312
13313 /* Handle the rip+disp32 form with no registers first. */
13314 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13315 {
13316 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13317 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13318 }
13319 else
13320 {
13321 /* Get the register (or SIB) value. */
13322 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13323 {
13324 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13325 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13326 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13327 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13328 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13329 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13330 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13331 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13332 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13333 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13334 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13335 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13336 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13337 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13338 /* SIB */
13339 case 4:
13340 case 12:
13341 {
13342 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13343
13344 /* Get the index and scale it. */
13345 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13346 {
13347 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13348 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13349 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13350 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13351 case 4: u64EffAddr = 0; /*none */ break;
13352 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13353 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13354 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13355 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13356 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13357 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13358 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13359 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13360 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13361 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13362 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13363 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13364 }
13365 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13366
13367 /* add base */
13368 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13369 {
13370 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13371 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13372 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13373 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13374 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
13375 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13376 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13377 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13378 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13379 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13380 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13381 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13382 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13383 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13384 /* complicated encodings */
13385 case 5:
13386 case 13:
13387 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13388 {
13389 if (!pVCpu->iem.s.uRexB)
13390 {
13391 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13392 SET_SS_DEF();
13393 }
13394 else
13395 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13396 }
13397 else
13398 {
13399 uint32_t u32Disp;
13400 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13401 u64EffAddr += (int32_t)u32Disp;
13402 }
13403 break;
13404 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13405 }
13406 break;
13407 }
13408 IEM_NOT_REACHED_DEFAULT_CASE_RET();
13409 }
13410
13411 /* Get and add the displacement. */
13412 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13413 {
13414 case 0:
13415 break;
13416 case 1:
13417 {
13418 int8_t i8Disp;
13419 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13420 u64EffAddr += i8Disp;
13421 break;
13422 }
13423 case 2:
13424 {
13425 uint32_t u32Disp;
13426 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13427 u64EffAddr += (int32_t)u32Disp;
13428 break;
13429 }
13430 IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
13431 }
13432
13433 }
13434
13435 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13436 *pGCPtrEff = u64EffAddr;
13437 else
13438 {
13439 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13440 *pGCPtrEff = u64EffAddr & UINT32_MAX;
13441 }
13442 }
13443
13444 Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
13445 return VINF_SUCCESS;
13446}
13447
13448
13449#ifdef IEM_WITH_SETJMP
13450/**
13451 * Calculates the effective address of a ModR/M memory operand.
13452 *
13453 * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
13454 *
13455 * May longjmp on internal error.
13456 *
13457 * @return The effective address.
13458 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13459 * @param bRm The ModRM byte.
13460 * @param cbImm The size of any immediate following the
13461 * effective address opcode bytes. Important for
13462 * RIP relative addressing.
13463 */
13464IEM_STATIC RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm)
13465{
13466 Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
13467# define SET_SS_DEF() \
13468 do \
13469 { \
13470 if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
13471 pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
13472 } while (0)
13473
13474 if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
13475 {
13476/** @todo Check the effective address size crap! */
13477 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
13478 {
13479 uint16_t u16EffAddr;
13480
13481 /* Handle the disp16 form with no registers first. */
13482 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
13483 IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
13484 else
13485 {
13486 /* Get the displacment. */
13487 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13488 {
13489 case 0: u16EffAddr = 0; break;
13490 case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
13491 case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
13492 default: AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_1)); /* (caller checked for these) */
13493 }
13494
13495 /* Add the base and index registers to the disp. */
13496 switch (bRm & X86_MODRM_RM_MASK)
13497 {
13498 case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
13499 case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
13500 case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
13501 case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
13502 case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
13503 case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
13504 case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
13505 case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
13506 }
13507 }
13508
13509 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX16\n", u16EffAddr));
13510 return u16EffAddr;
13511 }
13512
13513 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13514 uint32_t u32EffAddr;
13515
13516 /* Handle the disp32 form with no registers first. */
13517 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13518 IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
13519 else
13520 {
13521 /* Get the register (or SIB) value. */
13522 switch ((bRm & X86_MODRM_RM_MASK))
13523 {
13524 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13525 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13526 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13527 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13528 case 4: /* SIB */
13529 {
13530 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13531
13532 /* Get the index and scale it. */
13533 switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
13534 {
13535 case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
13536 case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
13537 case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
13538 case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
13539 case 4: u32EffAddr = 0; /*none */ break;
13540 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
13541 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13542 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13543 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13544 }
13545 u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13546
13547 /* add base */
13548 switch (bSib & X86_SIB_BASE_MASK)
13549 {
13550 case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
13551 case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
13552 case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
13553 case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
13554 case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
13555 case 5:
13556 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13557 {
13558 u32EffAddr += pVCpu->cpum.GstCtx.ebp;
13559 SET_SS_DEF();
13560 }
13561 else
13562 {
13563 uint32_t u32Disp;
13564 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13565 u32EffAddr += u32Disp;
13566 }
13567 break;
13568 case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
13569 case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
13570 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13571 }
13572 break;
13573 }
13574 case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
13575 case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
13576 case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
13577 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13578 }
13579
13580 /* Get and add the displacement. */
13581 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13582 {
13583 case 0:
13584 break;
13585 case 1:
13586 {
13587 int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13588 u32EffAddr += i8Disp;
13589 break;
13590 }
13591 case 2:
13592 {
13593 uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13594 u32EffAddr += u32Disp;
13595 break;
13596 }
13597 default:
13598 AssertFailedStmt(longjmp(*pVCpu->iem.s.CTX_SUFF(pJmpBuf), VERR_IEM_IPE_2)); /* (caller checked for these) */
13599 }
13600 }
13601
13602 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
13603 {
13604 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RX32\n", u32EffAddr));
13605 return u32EffAddr;
13606 }
13607 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
13608 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#06RX32\n", u32EffAddr & UINT16_MAX));
13609 return u32EffAddr & UINT16_MAX;
13610 }
13611
13612 uint64_t u64EffAddr;
13613
13614 /* Handle the rip+disp32 form with no registers first. */
13615 if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
13616 {
13617 IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
13618 u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
13619 }
13620 else
13621 {
13622 /* Get the register (or SIB) value. */
13623 switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
13624 {
13625 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13626 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13627 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13628 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13629 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
13630 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13631 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13632 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13633 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13634 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13635 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13636 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13637 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13638 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13639 /* SIB */
13640 case 4:
13641 case 12:
13642 {
13643 uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
13644
13645 /* Get the index and scale it. */
13646 switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
13647 {
13648 case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
13649 case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
13650 case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
13651 case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
13652 case 4: u64EffAddr = 0; /*none */ break;
13653 case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
13654 case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
13655 case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
13656 case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
13657 case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
13658 case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
13659 case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
13660 case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
13661 case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
13662 case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
13663 case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
13664 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13665 }
13666 u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
13667
13668 /* add base */
13669 switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
13670 {
13671 case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
13672 case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
13673 case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
13674 case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
13675 case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
13676 case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
13677 case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
13678 case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
13679 case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
13680 case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
13681 case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
13682 case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
13683 case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
13684 case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
13685 /* complicated encodings */
13686 case 5:
13687 case 13:
13688 if ((bRm & X86_MODRM_MOD_MASK) != 0)
13689 {
13690 if (!pVCpu->iem.s.uRexB)
13691 {
13692 u64EffAddr += pVCpu->cpum.GstCtx.rbp;
13693 SET_SS_DEF();
13694 }
13695 else
13696 u64EffAddr += pVCpu->cpum.GstCtx.r13;
13697 }
13698 else
13699 {
13700 uint32_t u32Disp;
13701 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13702 u64EffAddr += (int32_t)u32Disp;
13703 }
13704 break;
13705 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13706 }
13707 break;
13708 }
13709 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX);
13710 }
13711
13712 /* Get and add the displacement. */
13713 switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
13714 {
13715 case 0:
13716 break;
13717 case 1:
13718 {
13719 int8_t i8Disp;
13720 IEM_OPCODE_GET_NEXT_S8(&i8Disp);
13721 u64EffAddr += i8Disp;
13722 break;
13723 }
13724 case 2:
13725 {
13726 uint32_t u32Disp;
13727 IEM_OPCODE_GET_NEXT_U32(&u32Disp);
13728 u64EffAddr += (int32_t)u32Disp;
13729 break;
13730 }
13731 IEM_NOT_REACHED_DEFAULT_CASE_RET2(RTGCPTR_MAX); /* (caller checked for these) */
13732 }
13733
13734 }
13735
13736 if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
13737 {
13738 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr));
13739 return u64EffAddr;
13740 }
13741 Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
13742 Log5(("iemOpHlpCalcRmEffAddrJmp: EffAddr=%#010RGv\n", u64EffAddr & UINT32_MAX));
13743 return u64EffAddr & UINT32_MAX;
13744}
13745#endif /* IEM_WITH_SETJMP */
13746
13747/** @} */
13748
13749
13750
13751/*
13752 * Include the instructions
13753 */
13754#include "IEMAllInstructions.cpp.h"
13755
13756
13757
13758#ifdef LOG_ENABLED
13759/**
13760 * Logs the current instruction.
13761 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13762 * @param fSameCtx Set if we have the same context information as the VMM,
13763 * clear if we may have already executed an instruction in
13764 * our debug context. When clear, we assume IEMCPU holds
13765 * valid CPU mode info.
13766 *
13767 * The @a fSameCtx parameter is now misleading and obsolete.
13768 * @param pszFunction The IEM function doing the execution.
13769 */
13770IEM_STATIC void iemLogCurInstr(PVMCPUCC pVCpu, bool fSameCtx, const char *pszFunction)
13771{
13772# ifdef IN_RING3
13773 if (LogIs2Enabled())
13774 {
13775 char szInstr[256];
13776 uint32_t cbInstr = 0;
13777 if (fSameCtx)
13778 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, 0, 0,
13779 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,
13780 szInstr, sizeof(szInstr), &cbInstr);
13781 else
13782 {
13783 uint32_t fFlags = 0;
13784 switch (pVCpu->iem.s.enmCpuMode)
13785 {
13786 case IEMMODE_64BIT: fFlags |= DBGF_DISAS_FLAGS_64BIT_MODE; break;
13787 case IEMMODE_32BIT: fFlags |= DBGF_DISAS_FLAGS_32BIT_MODE; break;
13788 case IEMMODE_16BIT:
13789 if (!(pVCpu->cpum.GstCtx.cr0 & X86_CR0_PE) || pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
13790 fFlags |= DBGF_DISAS_FLAGS_16BIT_REAL_MODE;
13791 else
13792 fFlags |= DBGF_DISAS_FLAGS_16BIT_MODE;
13793 break;
13794 }
13795 DBGFR3DisasInstrEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, fFlags,
13796 szInstr, sizeof(szInstr), &cbInstr);
13797 }
13798
13799 PCX86FXSTATE pFpuCtx = &pVCpu->cpum.GstCtx.XState.x87;
13800 Log2(("**** %s\n"
13801 " eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
13802 " eip=%08x esp=%08x ebp=%08x iopl=%d tr=%04x\n"
13803 " cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x efl=%08x\n"
13804 " fsw=%04x fcw=%04x ftw=%02x mxcsr=%04x/%04x\n"
13805 " %s\n"
13806 , pszFunction,
13807 pVCpu->cpum.GstCtx.eax, pVCpu->cpum.GstCtx.ebx, pVCpu->cpum.GstCtx.ecx, pVCpu->cpum.GstCtx.edx, pVCpu->cpum.GstCtx.esi, pVCpu->cpum.GstCtx.edi,
13808 pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.ebp, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, pVCpu->cpum.GstCtx.tr.Sel,
13809 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.ds.Sel, pVCpu->cpum.GstCtx.es.Sel,
13810 pVCpu->cpum.GstCtx.fs.Sel, pVCpu->cpum.GstCtx.gs.Sel, pVCpu->cpum.GstCtx.eflags.u,
13811 pFpuCtx->FSW, pFpuCtx->FCW, pFpuCtx->FTW, pFpuCtx->MXCSR, pFpuCtx->MXCSR_MASK,
13812 szInstr));
13813
13814 if (LogIs3Enabled())
13815 DBGFR3InfoEx(pVCpu->pVMR3->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);
13816 }
13817 else
13818# endif
13819 LogFlow(("%s: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x\n", pszFunction, pVCpu->cpum.GstCtx.cs.Sel,
13820 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u));
13821 RT_NOREF_PV(pVCpu); RT_NOREF_PV(fSameCtx);
13822}
13823#endif /* LOG_ENABLED */
13824
13825
13826#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13827/**
13828 * Deals with VMCPU_FF_VMX_APIC_WRITE, VMCPU_FF_VMX_MTF, VMCPU_FF_VMX_NMI_WINDOW,
13829 * VMCPU_FF_VMX_PREEMPT_TIMER and VMCPU_FF_VMX_INT_WINDOW.
13830 *
13831 * @returns Modified rcStrict.
13832 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13833 * @param rcStrict The instruction execution status.
13834 */
13835static VBOXSTRICTRC iemHandleNestedInstructionBoundraryFFs(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13836{
13837 Assert(CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)));
13838 if (!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF))
13839 {
13840 /* VMX preemption timer takes priority over NMI-window exits. */
13841 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
13842 {
13843 rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
13844 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER));
13845 }
13846 /*
13847 * Check remaining intercepts.
13848 *
13849 * NMI-window and Interrupt-window VM-exits.
13850 * Interrupt shadow (block-by-STI and Mov SS) inhibits interrupts and may also block NMIs.
13851 * Event injection during VM-entry takes priority over NMI-window and interrupt-window VM-exits.
13852 *
13853 * See Intel spec. 26.7.6 "NMI-Window Exiting".
13854 * See Intel spec. 26.7.5 "Interrupt-Window Exiting and Virtual-Interrupt Delivery".
13855 */
13856 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW)
13857 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
13858 && !TRPMHasTrap(pVCpu))
13859 {
13860 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
13861 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
13862 && CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
13863 {
13864 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* u64ExitQual */);
13865 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW));
13866 }
13867 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
13868 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
13869 {
13870 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* u64ExitQual */);
13871 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW));
13872 }
13873 }
13874 }
13875 /* TPR-below threshold/APIC write has the highest priority. */
13876 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
13877 {
13878 rcStrict = iemVmxApicWriteEmulation(pVCpu);
13879 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13880 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE));
13881 }
13882 /* MTF takes priority over VMX-preemption timer. */
13883 else
13884 {
13885 rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* u64ExitQual */);
13886 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
13887 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF));
13888 }
13889 return rcStrict;
13890}
13891#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
13892
13893
13894/**
13895 * Makes status code addjustments (pass up from I/O and access handler)
13896 * as well as maintaining statistics.
13897 *
13898 * @returns Strict VBox status code to pass up.
13899 * @param pVCpu The cross context virtual CPU structure of the calling thread.
13900 * @param rcStrict The status from executing an instruction.
13901 */
13902DECL_FORCE_INLINE(VBOXSTRICTRC) iemExecStatusCodeFiddling(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
13903{
13904 if (rcStrict != VINF_SUCCESS)
13905 {
13906 if (RT_SUCCESS(rcStrict))
13907 {
13908 AssertMsg( (rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST)
13909 || rcStrict == VINF_IOM_R3_IOPORT_READ
13910 || rcStrict == VINF_IOM_R3_IOPORT_WRITE
13911 || rcStrict == VINF_IOM_R3_IOPORT_COMMIT_WRITE
13912 || rcStrict == VINF_IOM_R3_MMIO_READ
13913 || rcStrict == VINF_IOM_R3_MMIO_READ_WRITE
13914 || rcStrict == VINF_IOM_R3_MMIO_WRITE
13915 || rcStrict == VINF_IOM_R3_MMIO_COMMIT_WRITE
13916 || rcStrict == VINF_CPUM_R3_MSR_READ
13917 || rcStrict == VINF_CPUM_R3_MSR_WRITE
13918 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
13919 || rcStrict == VINF_EM_RAW_TO_R3
13920 || rcStrict == VINF_EM_TRIPLE_FAULT
13921 || rcStrict == VINF_GIM_R3_HYPERCALL
13922 /* raw-mode / virt handlers only: */
13923 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT
13924 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT
13925 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT
13926 || rcStrict == VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT
13927 || rcStrict == VINF_SELM_SYNC_GDT
13928 || rcStrict == VINF_CSAM_PENDING_ACTION
13929 || rcStrict == VINF_PATM_CHECK_PATCH_PAGE
13930 /* nested hw.virt codes: */
13931 || rcStrict == VINF_VMX_VMEXIT
13932 || rcStrict == VINF_VMX_INTERCEPT_NOT_ACTIVE
13933 || rcStrict == VINF_VMX_MODIFIES_BEHAVIOR
13934 || rcStrict == VINF_SVM_VMEXIT
13935 , ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
13936/** @todo adjust for VINF_EM_RAW_EMULATE_INSTR. */
13937 int32_t const rcPassUp = pVCpu->iem.s.rcPassUp;
13938#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
13939 if ( rcStrict == VINF_VMX_VMEXIT
13940 && rcPassUp == VINF_SUCCESS)
13941 rcStrict = VINF_SUCCESS;
13942 else
13943#endif
13944#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
13945 if ( rcStrict == VINF_SVM_VMEXIT
13946 && rcPassUp == VINF_SUCCESS)
13947 rcStrict = VINF_SUCCESS;
13948 else
13949#endif
13950 if (rcPassUp == VINF_SUCCESS)
13951 pVCpu->iem.s.cRetInfStatuses++;
13952 else if ( rcPassUp < VINF_EM_FIRST
13953 || rcPassUp > VINF_EM_LAST
13954 || rcPassUp < VBOXSTRICTRC_VAL(rcStrict))
13955 {
13956 Log(("IEM: rcPassUp=%Rrc! rcStrict=%Rrc\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13957 pVCpu->iem.s.cRetPassUpStatus++;
13958 rcStrict = rcPassUp;
13959 }
13960 else
13961 {
13962 Log(("IEM: rcPassUp=%Rrc rcStrict=%Rrc!\n", rcPassUp, VBOXSTRICTRC_VAL(rcStrict)));
13963 pVCpu->iem.s.cRetInfStatuses++;
13964 }
13965 }
13966 else if (rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED)
13967 pVCpu->iem.s.cRetAspectNotImplemented++;
13968 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
13969 pVCpu->iem.s.cRetInstrNotImplemented++;
13970 else
13971 pVCpu->iem.s.cRetErrStatuses++;
13972 }
13973 else if (pVCpu->iem.s.rcPassUp != VINF_SUCCESS)
13974 {
13975 pVCpu->iem.s.cRetPassUpStatus++;
13976 rcStrict = pVCpu->iem.s.rcPassUp;
13977 }
13978
13979 return rcStrict;
13980}
13981
13982
13983/**
13984 * The actual code execution bits of IEMExecOne, IEMExecOneEx, and
13985 * IEMExecOneWithPrefetchedByPC.
13986 *
13987 * Similar code is found in IEMExecLots.
13988 *
13989 * @return Strict VBox status code.
13990 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
13991 * @param fExecuteInhibit If set, execute the instruction following CLI,
13992 * POP SS and MOV SS,GR.
13993 * @param pszFunction The calling function name.
13994 */
13995DECLINLINE(VBOXSTRICTRC) iemExecOneInner(PVMCPUCC pVCpu, bool fExecuteInhibit, const char *pszFunction)
13996{
13997 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
13998 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
13999 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14000 RT_NOREF_PV(pszFunction);
14001
14002#ifdef IEM_WITH_SETJMP
14003 VBOXSTRICTRC rcStrict;
14004 jmp_buf JmpBuf;
14005 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14006 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14007 if ((rcStrict = setjmp(JmpBuf)) == 0)
14008 {
14009 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14010 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14011 }
14012 else
14013 pVCpu->iem.s.cLongJumps++;
14014 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14015#else
14016 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14017 VBOXSTRICTRC rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14018#endif
14019 if (rcStrict == VINF_SUCCESS)
14020 pVCpu->iem.s.cInstructions++;
14021 if (pVCpu->iem.s.cActiveMappings > 0)
14022 {
14023 Assert(rcStrict != VINF_SUCCESS);
14024 iemMemRollback(pVCpu);
14025 }
14026 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14027 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14028 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14029
14030//#ifdef DEBUG
14031// AssertMsg(IEM_GET_INSTR_LEN(pVCpu) == cbInstr || rcStrict != VINF_SUCCESS, ("%u %u\n", IEM_GET_INSTR_LEN(pVCpu), cbInstr));
14032//#endif
14033
14034#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14035 /*
14036 * Perform any VMX nested-guest instruction boundary actions.
14037 *
14038 * If any of these causes a VM-exit, we must skip executing the next
14039 * instruction (would run into stale page tables). A VM-exit makes sure
14040 * there is no interrupt-inhibition, so that should ensure we don't go
14041 * to try execute the next instruction. Clearing fExecuteInhibit is
14042 * problematic because of the setjmp/longjmp clobbering above.
14043 */
14044 if ( rcStrict == VINF_SUCCESS
14045 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
14046 | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW))
14047 rcStrict = iemHandleNestedInstructionBoundraryFFs(pVCpu, rcStrict);
14048#endif
14049
14050 /* Execute the next instruction as well if a cli, pop ss or
14051 mov ss, Gr has just completed successfully. */
14052 if ( fExecuteInhibit
14053 && rcStrict == VINF_SUCCESS
14054 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
14055 && EMIsInhibitInterruptsActive(pVCpu))
14056 {
14057 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, pVCpu->iem.s.fBypassHandlers, pVCpu->iem.s.fDisregardLock);
14058 if (rcStrict == VINF_SUCCESS)
14059 {
14060#ifdef LOG_ENABLED
14061 iemLogCurInstr(pVCpu, false, pszFunction);
14062#endif
14063#ifdef IEM_WITH_SETJMP
14064 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14065 if ((rcStrict = setjmp(JmpBuf)) == 0)
14066 {
14067 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14068 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14069 }
14070 else
14071 pVCpu->iem.s.cLongJumps++;
14072 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14073#else
14074 IEM_OPCODE_GET_NEXT_U8(&b);
14075 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14076#endif
14077 if (rcStrict == VINF_SUCCESS)
14078 pVCpu->iem.s.cInstructions++;
14079 if (pVCpu->iem.s.cActiveMappings > 0)
14080 {
14081 Assert(rcStrict != VINF_SUCCESS);
14082 iemMemRollback(pVCpu);
14083 }
14084 AssertMsg(pVCpu->iem.s.aMemMappings[0].fAccess == IEM_ACCESS_INVALID, ("0: %#x %RGp\n", pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemBbMappings[0].GCPhysFirst));
14085 AssertMsg(pVCpu->iem.s.aMemMappings[1].fAccess == IEM_ACCESS_INVALID, ("1: %#x %RGp\n", pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemBbMappings[1].GCPhysFirst));
14086 AssertMsg(pVCpu->iem.s.aMemMappings[2].fAccess == IEM_ACCESS_INVALID, ("2: %#x %RGp\n", pVCpu->iem.s.aMemMappings[2].fAccess, pVCpu->iem.s.aMemBbMappings[2].GCPhysFirst));
14087 }
14088 else if (pVCpu->iem.s.cActiveMappings > 0)
14089 iemMemRollback(pVCpu);
14090 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); /* hope this is correct for all exceptional cases... */
14091 }
14092
14093 /*
14094 * Return value fiddling, statistics and sanity assertions.
14095 */
14096 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14097
14098 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14099 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14100 return rcStrict;
14101}
14102
14103
14104/**
14105 * Execute one instruction.
14106 *
14107 * @return Strict VBox status code.
14108 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14109 */
14110VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPUCC pVCpu)
14111{
14112 AssertCompile(sizeof(pVCpu->iem.s) <= sizeof(pVCpu->iem.padding)); /* (tstVMStruct can't do it's job w/o instruction stats) */
14113#ifdef LOG_ENABLED
14114 iemLogCurInstr(pVCpu, true, "IEMExecOne");
14115#endif
14116
14117 /*
14118 * Do the decoding and emulation.
14119 */
14120 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14121 if (rcStrict == VINF_SUCCESS)
14122 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOne");
14123 else if (pVCpu->iem.s.cActiveMappings > 0)
14124 iemMemRollback(pVCpu);
14125
14126 if (rcStrict != VINF_SUCCESS)
14127 LogFlow(("IEMExecOne: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14128 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14129 return rcStrict;
14130}
14131
14132
14133VMMDECL(VBOXSTRICTRC) IEMExecOneEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14134{
14135 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14136
14137 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14138 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14139 if (rcStrict == VINF_SUCCESS)
14140 {
14141 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneEx");
14142 if (pcbWritten)
14143 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14144 }
14145 else if (pVCpu->iem.s.cActiveMappings > 0)
14146 iemMemRollback(pVCpu);
14147
14148 return rcStrict;
14149}
14150
14151
14152VMMDECL(VBOXSTRICTRC) IEMExecOneWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14153 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14154{
14155 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14156
14157 VBOXSTRICTRC rcStrict;
14158 if ( cbOpcodeBytes
14159 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14160 {
14161 iemInitDecoder(pVCpu, false, false);
14162#ifdef IEM_WITH_CODE_TLB
14163 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14164 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14165 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14166 pVCpu->iem.s.offCurInstrStart = 0;
14167 pVCpu->iem.s.offInstrNextByte = 0;
14168#else
14169 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14170 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14171#endif
14172 rcStrict = VINF_SUCCESS;
14173 }
14174 else
14175 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14176 if (rcStrict == VINF_SUCCESS)
14177 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneWithPrefetchedByPC");
14178 else if (pVCpu->iem.s.cActiveMappings > 0)
14179 iemMemRollback(pVCpu);
14180
14181 return rcStrict;
14182}
14183
14184
14185VMMDECL(VBOXSTRICTRC) IEMExecOneBypassEx(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint32_t *pcbWritten)
14186{
14187 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14188
14189 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14190 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14191 if (rcStrict == VINF_SUCCESS)
14192 {
14193 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassEx");
14194 if (pcbWritten)
14195 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14196 }
14197 else if (pVCpu->iem.s.cActiveMappings > 0)
14198 iemMemRollback(pVCpu);
14199
14200 return rcStrict;
14201}
14202
14203
14204VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPC(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14205 const void *pvOpcodeBytes, size_t cbOpcodeBytes)
14206{
14207 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14208
14209 VBOXSTRICTRC rcStrict;
14210 if ( cbOpcodeBytes
14211 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14212 {
14213 iemInitDecoder(pVCpu, true, false);
14214#ifdef IEM_WITH_CODE_TLB
14215 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14216 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14217 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14218 pVCpu->iem.s.offCurInstrStart = 0;
14219 pVCpu->iem.s.offInstrNextByte = 0;
14220#else
14221 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14222 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14223#endif
14224 rcStrict = VINF_SUCCESS;
14225 }
14226 else
14227 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14228 if (rcStrict == VINF_SUCCESS)
14229 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPC");
14230 else if (pVCpu->iem.s.cActiveMappings > 0)
14231 iemMemRollback(pVCpu);
14232
14233 return rcStrict;
14234}
14235
14236
14237/**
14238 * For debugging DISGetParamSize, may come in handy.
14239 *
14240 * @returns Strict VBox status code.
14241 * @param pVCpu The cross context virtual CPU structure of the
14242 * calling EMT.
14243 * @param pCtxCore The context core structure.
14244 * @param OpcodeBytesPC The PC of the opcode bytes.
14245 * @param pvOpcodeBytes Prefeched opcode bytes.
14246 * @param cbOpcodeBytes Number of prefetched bytes.
14247 * @param pcbWritten Where to return the number of bytes written.
14248 * Optional.
14249 */
14250VMMDECL(VBOXSTRICTRC) IEMExecOneBypassWithPrefetchedByPCWritten(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore, uint64_t OpcodeBytesPC,
14251 const void *pvOpcodeBytes, size_t cbOpcodeBytes,
14252 uint32_t *pcbWritten)
14253{
14254 AssertReturn(CPUMCTX2CORE(IEM_GET_CTX(pVCpu)) == pCtxCore, VERR_IEM_IPE_3);
14255
14256 uint32_t const cbOldWritten = pVCpu->iem.s.cbWritten;
14257 VBOXSTRICTRC rcStrict;
14258 if ( cbOpcodeBytes
14259 && pVCpu->cpum.GstCtx.rip == OpcodeBytesPC)
14260 {
14261 iemInitDecoder(pVCpu, true, false);
14262#ifdef IEM_WITH_CODE_TLB
14263 pVCpu->iem.s.uInstrBufPc = OpcodeBytesPC;
14264 pVCpu->iem.s.pbInstrBuf = (uint8_t const *)pvOpcodeBytes;
14265 pVCpu->iem.s.cbInstrBufTotal = (uint16_t)RT_MIN(X86_PAGE_SIZE, cbOpcodeBytes);
14266 pVCpu->iem.s.offCurInstrStart = 0;
14267 pVCpu->iem.s.offInstrNextByte = 0;
14268#else
14269 pVCpu->iem.s.cbOpcode = (uint8_t)RT_MIN(cbOpcodeBytes, sizeof(pVCpu->iem.s.abOpcode));
14270 memcpy(pVCpu->iem.s.abOpcode, pvOpcodeBytes, pVCpu->iem.s.cbOpcode);
14271#endif
14272 rcStrict = VINF_SUCCESS;
14273 }
14274 else
14275 rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, true, false);
14276 if (rcStrict == VINF_SUCCESS)
14277 {
14278 rcStrict = iemExecOneInner(pVCpu, false, "IEMExecOneBypassWithPrefetchedByPCWritten");
14279 if (pcbWritten)
14280 *pcbWritten = pVCpu->iem.s.cbWritten - cbOldWritten;
14281 }
14282 else if (pVCpu->iem.s.cActiveMappings > 0)
14283 iemMemRollback(pVCpu);
14284
14285 return rcStrict;
14286}
14287
14288
14289/**
14290 * For handling split cacheline lock operations when the host has split-lock
14291 * detection enabled.
14292 *
14293 * This will cause the interpreter to disregard the lock prefix and implicit
14294 * locking (xchg).
14295 *
14296 * @returns Strict VBox status code.
14297 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14298 */
14299VMMDECL(VBOXSTRICTRC) IEMExecOneIgnoreLock(PVMCPUCC pVCpu)
14300{
14301 /*
14302 * Do the decoding and emulation.
14303 */
14304 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, true /*fDisregardLock*/);
14305 if (rcStrict == VINF_SUCCESS)
14306 rcStrict = iemExecOneInner(pVCpu, true, "IEMExecOneIgnoreLock");
14307 else if (pVCpu->iem.s.cActiveMappings > 0)
14308 iemMemRollback(pVCpu);
14309
14310 if (rcStrict != VINF_SUCCESS)
14311 LogFlow(("IEMExecOneIgnoreLock: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14312 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14313 return rcStrict;
14314}
14315
14316
14317VMMDECL(VBOXSTRICTRC) IEMExecLots(PVMCPUCC pVCpu, uint32_t cMaxInstructions, uint32_t cPollRate, uint32_t *pcInstructions)
14318{
14319 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions;
14320 AssertMsg(RT_IS_POWER_OF_TWO(cPollRate + 1), ("%#x\n", cPollRate));
14321
14322 /*
14323 * See if there is an interrupt pending in TRPM, inject it if we can.
14324 */
14325 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
14326#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14327 bool fIntrEnabled = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
14328 if (fIntrEnabled)
14329 {
14330 if (!CPUMIsGuestInNestedHwvirtMode(IEM_GET_CTX(pVCpu)))
14331 fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14332 else if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
14333 fIntrEnabled = CPUMIsGuestVmxPhysIntrEnabled(IEM_GET_CTX(pVCpu));
14334 else
14335 {
14336 Assert(CPUMIsGuestInSvmNestedHwVirtMode(IEM_GET_CTX(pVCpu)));
14337 fIntrEnabled = CPUMIsGuestSvmPhysIntrEnabled(pVCpu, IEM_GET_CTX(pVCpu));
14338 }
14339 }
14340#else
14341 bool fIntrEnabled = pVCpu->cpum.GstCtx.eflags.Bits.u1IF;
14342#endif
14343
14344 /** @todo What if we are injecting an exception and not an interrupt? Is that
14345 * possible here? For now we assert it is indeed only an interrupt. */
14346 if ( fIntrEnabled
14347 && TRPMHasTrap(pVCpu)
14348 && EMGetInhibitInterruptsPC(pVCpu) != pVCpu->cpum.GstCtx.rip)
14349 {
14350 uint8_t u8TrapNo;
14351 TRPMEVENT enmType;
14352 uint32_t uErrCode;
14353 RTGCPTR uCr2;
14354 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */, NULL /* fIcebp */);
14355 AssertRC(rc2);
14356 Assert(enmType == TRPM_HARDWARE_INT);
14357 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);
14358 TRPMResetTrap(pVCpu);
14359#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14360 /* Injecting an event may cause a VM-exit. */
14361 if ( rcStrict != VINF_SUCCESS
14362 && rcStrict != VINF_IEM_RAISED_XCPT)
14363 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14364#else
14365 NOREF(rcStrict);
14366#endif
14367 }
14368
14369 /*
14370 * Initial decoder init w/ prefetch, then setup setjmp.
14371 */
14372 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14373 if (rcStrict == VINF_SUCCESS)
14374 {
14375#ifdef IEM_WITH_SETJMP
14376 jmp_buf JmpBuf;
14377 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14378 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14379 pVCpu->iem.s.cActiveMappings = 0;
14380 if ((rcStrict = setjmp(JmpBuf)) == 0)
14381#endif
14382 {
14383 /*
14384 * The run loop. We limit ourselves to 4096 instructions right now.
14385 */
14386 uint32_t cMaxInstructionsGccStupidity = cMaxInstructions;
14387 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
14388 for (;;)
14389 {
14390 /*
14391 * Log the state.
14392 */
14393#ifdef LOG_ENABLED
14394 iemLogCurInstr(pVCpu, true, "IEMExecLots");
14395#endif
14396
14397 /*
14398 * Do the decoding and emulation.
14399 */
14400 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14401 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14402 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14403 {
14404 Assert(pVCpu->iem.s.cActiveMappings == 0);
14405 pVCpu->iem.s.cInstructions++;
14406 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14407 {
14408 uint64_t fCpu = pVCpu->fLocalForcedActions
14409 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14410 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14411 | VMCPU_FF_TLB_FLUSH
14412 | VMCPU_FF_INHIBIT_INTERRUPTS
14413 | VMCPU_FF_BLOCK_NMIS
14414 | VMCPU_FF_UNHALT ));
14415
14416 if (RT_LIKELY( ( !fCpu
14417 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14418 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF) )
14419 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) ))
14420 {
14421 if (cMaxInstructionsGccStupidity-- > 0)
14422 {
14423 /* Poll timers every now an then according to the caller's specs. */
14424 if ( (cMaxInstructionsGccStupidity & cPollRate) != 0
14425 || !TMTimerPollBool(pVM, pVCpu))
14426 {
14427 Assert(pVCpu->iem.s.cActiveMappings == 0);
14428 iemReInitDecoder(pVCpu);
14429 continue;
14430 }
14431 }
14432 }
14433 }
14434 Assert(pVCpu->iem.s.cActiveMappings == 0);
14435 }
14436 else if (pVCpu->iem.s.cActiveMappings > 0)
14437 iemMemRollback(pVCpu);
14438 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14439 break;
14440 }
14441 }
14442#ifdef IEM_WITH_SETJMP
14443 else
14444 {
14445 if (pVCpu->iem.s.cActiveMappings > 0)
14446 iemMemRollback(pVCpu);
14447# if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14448 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14449# endif
14450 pVCpu->iem.s.cLongJumps++;
14451 }
14452 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14453#endif
14454
14455 /*
14456 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14457 */
14458 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14459 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14460 }
14461 else
14462 {
14463 if (pVCpu->iem.s.cActiveMappings > 0)
14464 iemMemRollback(pVCpu);
14465
14466#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14467 /*
14468 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14469 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14470 */
14471 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14472#endif
14473 }
14474
14475 /*
14476 * Maybe re-enter raw-mode and log.
14477 */
14478 if (rcStrict != VINF_SUCCESS)
14479 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14480 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14481 if (pcInstructions)
14482 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart;
14483 return rcStrict;
14484}
14485
14486
14487/**
14488 * Interface used by EMExecuteExec, does exit statistics and limits.
14489 *
14490 * @returns Strict VBox status code.
14491 * @param pVCpu The cross context virtual CPU structure.
14492 * @param fWillExit To be defined.
14493 * @param cMinInstructions Minimum number of instructions to execute before checking for FFs.
14494 * @param cMaxInstructions Maximum number of instructions to execute.
14495 * @param cMaxInstructionsWithoutExits
14496 * The max number of instructions without exits.
14497 * @param pStats Where to return statistics.
14498 */
14499VMMDECL(VBOXSTRICTRC) IEMExecForExits(PVMCPUCC pVCpu, uint32_t fWillExit, uint32_t cMinInstructions, uint32_t cMaxInstructions,
14500 uint32_t cMaxInstructionsWithoutExits, PIEMEXECFOREXITSTATS pStats)
14501{
14502 NOREF(fWillExit); /** @todo define flexible exit crits */
14503
14504 /*
14505 * Initialize return stats.
14506 */
14507 pStats->cInstructions = 0;
14508 pStats->cExits = 0;
14509 pStats->cMaxExitDistance = 0;
14510 pStats->cReserved = 0;
14511
14512 /*
14513 * Initial decoder init w/ prefetch, then setup setjmp.
14514 */
14515 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false, false);
14516 if (rcStrict == VINF_SUCCESS)
14517 {
14518#ifdef IEM_WITH_SETJMP
14519 jmp_buf JmpBuf;
14520 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf);
14521 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf;
14522 pVCpu->iem.s.cActiveMappings = 0;
14523 if ((rcStrict = setjmp(JmpBuf)) == 0)
14524#endif
14525 {
14526#ifdef IN_RING0
14527 bool const fCheckPreemptionPending = !RTThreadPreemptIsPossible() || !RTThreadPreemptIsEnabled(NIL_RTTHREAD);
14528#endif
14529 uint32_t cInstructionSinceLastExit = 0;
14530
14531 /*
14532 * The run loop. We limit ourselves to 4096 instructions right now.
14533 */
14534 PVM pVM = pVCpu->CTX_SUFF(pVM);
14535 for (;;)
14536 {
14537 /*
14538 * Log the state.
14539 */
14540#ifdef LOG_ENABLED
14541 iemLogCurInstr(pVCpu, true, "IEMExecForExits");
14542#endif
14543
14544 /*
14545 * Do the decoding and emulation.
14546 */
14547 uint32_t const cPotentialExits = pVCpu->iem.s.cPotentialExits;
14548
14549 uint8_t b; IEM_OPCODE_GET_NEXT_U8(&b);
14550 rcStrict = FNIEMOP_CALL(g_apfnOneByteMap[b]);
14551
14552 if ( cPotentialExits != pVCpu->iem.s.cPotentialExits
14553 && cInstructionSinceLastExit > 0 /* don't count the first */ )
14554 {
14555 pStats->cExits += 1;
14556 if (cInstructionSinceLastExit > pStats->cMaxExitDistance)
14557 pStats->cMaxExitDistance = cInstructionSinceLastExit;
14558 cInstructionSinceLastExit = 0;
14559 }
14560
14561 if (RT_LIKELY(rcStrict == VINF_SUCCESS))
14562 {
14563 Assert(pVCpu->iem.s.cActiveMappings == 0);
14564 pVCpu->iem.s.cInstructions++;
14565 pStats->cInstructions++;
14566 cInstructionSinceLastExit++;
14567 if (RT_LIKELY(pVCpu->iem.s.rcPassUp == VINF_SUCCESS))
14568 {
14569 uint64_t fCpu = pVCpu->fLocalForcedActions
14570 & ( VMCPU_FF_ALL_MASK & ~( VMCPU_FF_PGM_SYNC_CR3
14571 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL
14572 | VMCPU_FF_TLB_FLUSH
14573 | VMCPU_FF_INHIBIT_INTERRUPTS
14574 | VMCPU_FF_BLOCK_NMIS
14575 | VMCPU_FF_UNHALT ));
14576
14577 if (RT_LIKELY( ( ( !fCpu
14578 || ( !(fCpu & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
14579 && !pVCpu->cpum.GstCtx.rflags.Bits.u1IF))
14580 && !VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_MASK) )
14581 || pStats->cInstructions < cMinInstructions))
14582 {
14583 if (pStats->cInstructions < cMaxInstructions)
14584 {
14585 if (cInstructionSinceLastExit <= cMaxInstructionsWithoutExits)
14586 {
14587#ifdef IN_RING0
14588 if ( !fCheckPreemptionPending
14589 || !RTThreadPreemptIsPending(NIL_RTTHREAD))
14590#endif
14591 {
14592 Assert(pVCpu->iem.s.cActiveMappings == 0);
14593 iemReInitDecoder(pVCpu);
14594 continue;
14595 }
14596#ifdef IN_RING0
14597 rcStrict = VINF_EM_RAW_INTERRUPT;
14598 break;
14599#endif
14600 }
14601 }
14602 }
14603 Assert(!(fCpu & VMCPU_FF_IEM));
14604 }
14605 Assert(pVCpu->iem.s.cActiveMappings == 0);
14606 }
14607 else if (pVCpu->iem.s.cActiveMappings > 0)
14608 iemMemRollback(pVCpu);
14609 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14610 break;
14611 }
14612 }
14613#ifdef IEM_WITH_SETJMP
14614 else
14615 {
14616 if (pVCpu->iem.s.cActiveMappings > 0)
14617 iemMemRollback(pVCpu);
14618 pVCpu->iem.s.cLongJumps++;
14619 }
14620 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf;
14621#endif
14622
14623 /*
14624 * Assert hidden register sanity (also done in iemInitDecoder and iemReInitDecoder).
14625 */
14626 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.cs));
14627 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pVCpu->cpum.GstCtx.ss));
14628 }
14629 else
14630 {
14631 if (pVCpu->iem.s.cActiveMappings > 0)
14632 iemMemRollback(pVCpu);
14633
14634#if defined(VBOX_WITH_NESTED_HWVIRT_SVM) || defined(VBOX_WITH_NESTED_HWVIRT_VMX)
14635 /*
14636 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching
14637 * code as part of instruction execution, we need this to fix-up VINF_SVM_VMEXIT.
14638 */
14639 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict);
14640#endif
14641 }
14642
14643 /*
14644 * Maybe re-enter raw-mode and log.
14645 */
14646 if (rcStrict != VINF_SUCCESS)
14647 LogFlow(("IEMExecForExits: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc; ins=%u exits=%u maxdist=%u\n",
14648 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss.Sel, pVCpu->cpum.GstCtx.rsp,
14649 pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict), pStats->cInstructions, pStats->cExits, pStats->cMaxExitDistance));
14650 return rcStrict;
14651}
14652
14653
14654/**
14655 * Injects a trap, fault, abort, software interrupt or external interrupt.
14656 *
14657 * The parameter list matches TRPMQueryTrapAll pretty closely.
14658 *
14659 * @returns Strict VBox status code.
14660 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14661 * @param u8TrapNo The trap number.
14662 * @param enmType What type is it (trap/fault/abort), software
14663 * interrupt or hardware interrupt.
14664 * @param uErrCode The error code if applicable.
14665 * @param uCr2 The CR2 value if applicable.
14666 * @param cbInstr The instruction length (only relevant for
14667 * software interrupts).
14668 */
14669VMM_INT_DECL(VBOXSTRICTRC) IEMInjectTrap(PVMCPUCC pVCpu, uint8_t u8TrapNo, TRPMEVENT enmType, uint16_t uErrCode, RTGCPTR uCr2,
14670 uint8_t cbInstr)
14671{
14672 iemInitDecoder(pVCpu, false, false);
14673#ifdef DBGFTRACE_ENABLED
14674 RTTraceBufAddMsgF(pVCpu->CTX_SUFF(pVM)->CTX_SUFF(hTraceBuf), "IEMInjectTrap: %x %d %x %llx",
14675 u8TrapNo, enmType, uErrCode, uCr2);
14676#endif
14677
14678 uint32_t fFlags;
14679 switch (enmType)
14680 {
14681 case TRPM_HARDWARE_INT:
14682 Log(("IEMInjectTrap: %#4x ext\n", u8TrapNo));
14683 fFlags = IEM_XCPT_FLAGS_T_EXT_INT;
14684 uErrCode = uCr2 = 0;
14685 break;
14686
14687 case TRPM_SOFTWARE_INT:
14688 Log(("IEMInjectTrap: %#4x soft\n", u8TrapNo));
14689 fFlags = IEM_XCPT_FLAGS_T_SOFT_INT;
14690 uErrCode = uCr2 = 0;
14691 break;
14692
14693 case TRPM_TRAP:
14694 Log(("IEMInjectTrap: %#4x trap err=%#x cr2=%#RGv\n", u8TrapNo, uErrCode, uCr2));
14695 fFlags = IEM_XCPT_FLAGS_T_CPU_XCPT;
14696 if (u8TrapNo == X86_XCPT_PF)
14697 fFlags |= IEM_XCPT_FLAGS_CR2;
14698 switch (u8TrapNo)
14699 {
14700 case X86_XCPT_DF:
14701 case X86_XCPT_TS:
14702 case X86_XCPT_NP:
14703 case X86_XCPT_SS:
14704 case X86_XCPT_PF:
14705 case X86_XCPT_AC:
14706 case X86_XCPT_GP:
14707 fFlags |= IEM_XCPT_FLAGS_ERR;
14708 break;
14709 }
14710 break;
14711
14712 IEM_NOT_REACHED_DEFAULT_CASE_RET();
14713 }
14714
14715 VBOXSTRICTRC rcStrict = iemRaiseXcptOrInt(pVCpu, cbInstr, u8TrapNo, fFlags, uErrCode, uCr2);
14716
14717 if (pVCpu->iem.s.cActiveMappings > 0)
14718 iemMemRollback(pVCpu);
14719
14720 return rcStrict;
14721}
14722
14723
14724/**
14725 * Injects the active TRPM event.
14726 *
14727 * @returns Strict VBox status code.
14728 * @param pVCpu The cross context virtual CPU structure.
14729 */
14730VMMDECL(VBOXSTRICTRC) IEMInjectTrpmEvent(PVMCPUCC pVCpu)
14731{
14732#ifndef IEM_IMPLEMENTS_TASKSWITCH
14733 IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(("Event injection\n"));
14734#else
14735 uint8_t u8TrapNo;
14736 TRPMEVENT enmType;
14737 uint32_t uErrCode;
14738 RTGCUINTPTR uCr2;
14739 uint8_t cbInstr;
14740 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, &cbInstr, NULL /* fIcebp */);
14741 if (RT_FAILURE(rc))
14742 return rc;
14743
14744 /** @todo r=ramshankar: Pass ICEBP info. to IEMInjectTrap() below and handle
14745 * ICEBP \#DB injection as a special case. */
14746 VBOXSTRICTRC rcStrict = IEMInjectTrap(pVCpu, u8TrapNo, enmType, uErrCode, uCr2, cbInstr);
14747#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
14748 if (rcStrict == VINF_SVM_VMEXIT)
14749 rcStrict = VINF_SUCCESS;
14750#endif
14751#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
14752 if (rcStrict == VINF_VMX_VMEXIT)
14753 rcStrict = VINF_SUCCESS;
14754#endif
14755 /** @todo Are there any other codes that imply the event was successfully
14756 * delivered to the guest? See @bugref{6607}. */
14757 if ( rcStrict == VINF_SUCCESS
14758 || rcStrict == VINF_IEM_RAISED_XCPT)
14759 TRPMResetTrap(pVCpu);
14760
14761 return rcStrict;
14762#endif
14763}
14764
14765
14766VMM_INT_DECL(int) IEMBreakpointSet(PVM pVM, RTGCPTR GCPtrBp)
14767{
14768 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14769 return VERR_NOT_IMPLEMENTED;
14770}
14771
14772
14773VMM_INT_DECL(int) IEMBreakpointClear(PVM pVM, RTGCPTR GCPtrBp)
14774{
14775 RT_NOREF_PV(pVM); RT_NOREF_PV(GCPtrBp);
14776 return VERR_NOT_IMPLEMENTED;
14777}
14778
14779
14780#if 0 /* The IRET-to-v8086 mode in PATM is very optimistic, so I don't dare do this yet. */
14781/**
14782 * Executes a IRET instruction with default operand size.
14783 *
14784 * This is for PATM.
14785 *
14786 * @returns VBox status code.
14787 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
14788 * @param pCtxCore The register frame.
14789 */
14790VMM_INT_DECL(int) IEMExecInstr_iret(PVMCPUCC pVCpu, PCPUMCTXCORE pCtxCore)
14791{
14792 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);
14793
14794 iemCtxCoreToCtx(pCtx, pCtxCore);
14795 iemInitDecoder(pVCpu);
14796 VBOXSTRICTRC rcStrict = iemCImpl_iret(pVCpu, 1, pVCpu->iem.s.enmDefOpSize);
14797 if (rcStrict == VINF_SUCCESS)
14798 iemCtxToCtxCore(pCtxCore, pCtx);
14799 else
14800 LogFlow(("IEMExecInstr_iret: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n",
14801 pVCpu->cpum.GstCtx.cs, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.ss, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.u, VBOXSTRICTRC_VAL(rcStrict)));
14802 return rcStrict;
14803}
14804#endif
14805
14806
14807/**
14808 * Macro used by the IEMExec* method to check the given instruction length.
14809 *
14810 * Will return on failure!
14811 *
14812 * @param a_cbInstr The given instruction length.
14813 * @param a_cbMin The minimum length.
14814 */
14815#define IEMEXEC_ASSERT_INSTR_LEN_RETURN(a_cbInstr, a_cbMin) \
14816 AssertMsgReturn((unsigned)(a_cbInstr) - (unsigned)(a_cbMin) <= (unsigned)15 - (unsigned)(a_cbMin), \
14817 ("cbInstr=%u cbMin=%u\n", (a_cbInstr), (a_cbMin)), VERR_IEM_INVALID_INSTR_LENGTH)
14818
14819
14820/**
14821 * Calls iemUninitExec, iemExecStatusCodeFiddling and iemRCRawMaybeReenter.
14822 *
14823 * Only calling iemRCRawMaybeReenter in raw-mode, obviously.
14824 *
14825 * @returns Fiddled strict vbox status code, ready to return to non-IEM caller.
14826 * @param pVCpu The cross context virtual CPU structure of the calling thread.
14827 * @param rcStrict The status code to fiddle.
14828 */
14829DECLINLINE(VBOXSTRICTRC) iemUninitExecAndFiddleStatusAndMaybeReenter(PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
14830{
14831 iemUninitExec(pVCpu);
14832 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
14833}
14834
14835
14836/**
14837 * Interface for HM and EM for executing string I/O OUT (write) instructions.
14838 *
14839 * This API ASSUMES that the caller has already verified that the guest code is
14840 * allowed to access the I/O port. (The I/O port is in the DX register in the
14841 * guest state.)
14842 *
14843 * @returns Strict VBox status code.
14844 * @param pVCpu The cross context virtual CPU structure.
14845 * @param cbValue The size of the I/O port access (1, 2, or 4).
14846 * @param enmAddrMode The addressing mode.
14847 * @param fRepPrefix Indicates whether a repeat prefix is used
14848 * (doesn't matter which for this instruction).
14849 * @param cbInstr The instruction length in bytes.
14850 * @param iEffSeg The effective segment address.
14851 * @param fIoChecked Whether the access to the I/O port has been
14852 * checked or not. It's typically checked in the
14853 * HM scenario.
14854 */
14855VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoWrite(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14856 bool fRepPrefix, uint8_t cbInstr, uint8_t iEffSeg, bool fIoChecked)
14857{
14858 AssertMsgReturn(iEffSeg < X86_SREG_COUNT, ("%#x\n", iEffSeg), VERR_IEM_INVALID_EFF_SEG);
14859 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14860
14861 /*
14862 * State init.
14863 */
14864 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14865
14866 /*
14867 * Switch orgy for getting to the right handler.
14868 */
14869 VBOXSTRICTRC rcStrict;
14870 if (fRepPrefix)
14871 {
14872 switch (enmAddrMode)
14873 {
14874 case IEMMODE_16BIT:
14875 switch (cbValue)
14876 {
14877 case 1: rcStrict = iemCImpl_rep_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14878 case 2: rcStrict = iemCImpl_rep_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14879 case 4: rcStrict = iemCImpl_rep_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14880 default:
14881 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14882 }
14883 break;
14884
14885 case IEMMODE_32BIT:
14886 switch (cbValue)
14887 {
14888 case 1: rcStrict = iemCImpl_rep_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14889 case 2: rcStrict = iemCImpl_rep_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14890 case 4: rcStrict = iemCImpl_rep_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14891 default:
14892 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14893 }
14894 break;
14895
14896 case IEMMODE_64BIT:
14897 switch (cbValue)
14898 {
14899 case 1: rcStrict = iemCImpl_rep_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14900 case 2: rcStrict = iemCImpl_rep_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14901 case 4: rcStrict = iemCImpl_rep_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14902 default:
14903 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14904 }
14905 break;
14906
14907 default:
14908 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14909 }
14910 }
14911 else
14912 {
14913 switch (enmAddrMode)
14914 {
14915 case IEMMODE_16BIT:
14916 switch (cbValue)
14917 {
14918 case 1: rcStrict = iemCImpl_outs_op8_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14919 case 2: rcStrict = iemCImpl_outs_op16_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14920 case 4: rcStrict = iemCImpl_outs_op32_addr16(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14921 default:
14922 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14923 }
14924 break;
14925
14926 case IEMMODE_32BIT:
14927 switch (cbValue)
14928 {
14929 case 1: rcStrict = iemCImpl_outs_op8_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14930 case 2: rcStrict = iemCImpl_outs_op16_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14931 case 4: rcStrict = iemCImpl_outs_op32_addr32(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14932 default:
14933 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14934 }
14935 break;
14936
14937 case IEMMODE_64BIT:
14938 switch (cbValue)
14939 {
14940 case 1: rcStrict = iemCImpl_outs_op8_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14941 case 2: rcStrict = iemCImpl_outs_op16_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14942 case 4: rcStrict = iemCImpl_outs_op32_addr64(pVCpu, cbInstr, iEffSeg, fIoChecked); break;
14943 default:
14944 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
14945 }
14946 break;
14947
14948 default:
14949 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
14950 }
14951 }
14952
14953 if (pVCpu->iem.s.cActiveMappings)
14954 iemMemRollback(pVCpu);
14955
14956 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
14957}
14958
14959
14960/**
14961 * Interface for HM and EM for executing string I/O IN (read) instructions.
14962 *
14963 * This API ASSUMES that the caller has already verified that the guest code is
14964 * allowed to access the I/O port. (The I/O port is in the DX register in the
14965 * guest state.)
14966 *
14967 * @returns Strict VBox status code.
14968 * @param pVCpu The cross context virtual CPU structure.
14969 * @param cbValue The size of the I/O port access (1, 2, or 4).
14970 * @param enmAddrMode The addressing mode.
14971 * @param fRepPrefix Indicates whether a repeat prefix is used
14972 * (doesn't matter which for this instruction).
14973 * @param cbInstr The instruction length in bytes.
14974 * @param fIoChecked Whether the access to the I/O port has been
14975 * checked or not. It's typically checked in the
14976 * HM scenario.
14977 */
14978VMM_INT_DECL(VBOXSTRICTRC) IEMExecStringIoRead(PVMCPUCC pVCpu, uint8_t cbValue, IEMMODE enmAddrMode,
14979 bool fRepPrefix, uint8_t cbInstr, bool fIoChecked)
14980{
14981 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
14982
14983 /*
14984 * State init.
14985 */
14986 iemInitExec(pVCpu, false /*fBypassHandlers*/);
14987
14988 /*
14989 * Switch orgy for getting to the right handler.
14990 */
14991 VBOXSTRICTRC rcStrict;
14992 if (fRepPrefix)
14993 {
14994 switch (enmAddrMode)
14995 {
14996 case IEMMODE_16BIT:
14997 switch (cbValue)
14998 {
14999 case 1: rcStrict = iemCImpl_rep_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15000 case 2: rcStrict = iemCImpl_rep_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15001 case 4: rcStrict = iemCImpl_rep_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15002 default:
15003 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15004 }
15005 break;
15006
15007 case IEMMODE_32BIT:
15008 switch (cbValue)
15009 {
15010 case 1: rcStrict = iemCImpl_rep_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15011 case 2: rcStrict = iemCImpl_rep_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15012 case 4: rcStrict = iemCImpl_rep_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15013 default:
15014 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15015 }
15016 break;
15017
15018 case IEMMODE_64BIT:
15019 switch (cbValue)
15020 {
15021 case 1: rcStrict = iemCImpl_rep_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15022 case 2: rcStrict = iemCImpl_rep_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15023 case 4: rcStrict = iemCImpl_rep_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15024 default:
15025 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15026 }
15027 break;
15028
15029 default:
15030 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15031 }
15032 }
15033 else
15034 {
15035 switch (enmAddrMode)
15036 {
15037 case IEMMODE_16BIT:
15038 switch (cbValue)
15039 {
15040 case 1: rcStrict = iemCImpl_ins_op8_addr16(pVCpu, cbInstr, fIoChecked); break;
15041 case 2: rcStrict = iemCImpl_ins_op16_addr16(pVCpu, cbInstr, fIoChecked); break;
15042 case 4: rcStrict = iemCImpl_ins_op32_addr16(pVCpu, cbInstr, fIoChecked); break;
15043 default:
15044 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15045 }
15046 break;
15047
15048 case IEMMODE_32BIT:
15049 switch (cbValue)
15050 {
15051 case 1: rcStrict = iemCImpl_ins_op8_addr32(pVCpu, cbInstr, fIoChecked); break;
15052 case 2: rcStrict = iemCImpl_ins_op16_addr32(pVCpu, cbInstr, fIoChecked); break;
15053 case 4: rcStrict = iemCImpl_ins_op32_addr32(pVCpu, cbInstr, fIoChecked); break;
15054 default:
15055 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15056 }
15057 break;
15058
15059 case IEMMODE_64BIT:
15060 switch (cbValue)
15061 {
15062 case 1: rcStrict = iemCImpl_ins_op8_addr64(pVCpu, cbInstr, fIoChecked); break;
15063 case 2: rcStrict = iemCImpl_ins_op16_addr64(pVCpu, cbInstr, fIoChecked); break;
15064 case 4: rcStrict = iemCImpl_ins_op32_addr64(pVCpu, cbInstr, fIoChecked); break;
15065 default:
15066 AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_IEM_INVALID_OPERAND_SIZE);
15067 }
15068 break;
15069
15070 default:
15071 AssertMsgFailedReturn(("enmAddrMode=%d\n", enmAddrMode), VERR_IEM_INVALID_ADDRESS_MODE);
15072 }
15073 }
15074
15075 Assert(pVCpu->iem.s.cActiveMappings == 0 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM));
15076 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15077}
15078
15079
15080/**
15081 * Interface for rawmode to write execute an OUT instruction.
15082 *
15083 * @returns Strict VBox status code.
15084 * @param pVCpu The cross context virtual CPU structure.
15085 * @param cbInstr The instruction length in bytes.
15086 * @param u16Port The port to read.
15087 * @param fImm Whether the port is specified using an immediate operand or
15088 * using the implicit DX register.
15089 * @param cbReg The register size.
15090 *
15091 * @remarks In ring-0 not all of the state needs to be synced in.
15092 */
15093VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedOut(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15094{
15095 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15096 Assert(cbReg <= 4 && cbReg != 3);
15097
15098 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15099 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_out, u16Port, fImm, cbReg);
15100 Assert(!pVCpu->iem.s.cActiveMappings);
15101 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15102}
15103
15104
15105/**
15106 * Interface for rawmode to write execute an IN instruction.
15107 *
15108 * @returns Strict VBox status code.
15109 * @param pVCpu The cross context virtual CPU structure.
15110 * @param cbInstr The instruction length in bytes.
15111 * @param u16Port The port to read.
15112 * @param fImm Whether the port is specified using an immediate operand or
15113 * using the implicit DX.
15114 * @param cbReg The register size.
15115 */
15116VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedIn(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t u16Port, bool fImm, uint8_t cbReg)
15117{
15118 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15119 Assert(cbReg <= 4 && cbReg != 3);
15120
15121 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15122 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_in, u16Port, fImm, cbReg);
15123 Assert(!pVCpu->iem.s.cActiveMappings);
15124 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15125}
15126
15127
15128/**
15129 * Interface for HM and EM to write to a CRx register.
15130 *
15131 * @returns Strict VBox status code.
15132 * @param pVCpu The cross context virtual CPU structure.
15133 * @param cbInstr The instruction length in bytes.
15134 * @param iCrReg The control register number (destination).
15135 * @param iGReg The general purpose register number (source).
15136 *
15137 * @remarks In ring-0 not all of the state needs to be synced in.
15138 */
15139VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxWrite(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iCrReg, uint8_t iGReg)
15140{
15141 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15142 Assert(iCrReg < 16);
15143 Assert(iGReg < 16);
15144
15145 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15146 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Cd_Rd, iCrReg, iGReg);
15147 Assert(!pVCpu->iem.s.cActiveMappings);
15148 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15149}
15150
15151
15152/**
15153 * Interface for HM and EM to read from a CRx register.
15154 *
15155 * @returns Strict VBox status code.
15156 * @param pVCpu The cross context virtual CPU structure.
15157 * @param cbInstr The instruction length in bytes.
15158 * @param iGReg The general purpose register number (destination).
15159 * @param iCrReg The control register number (source).
15160 *
15161 * @remarks In ring-0 not all of the state needs to be synced in.
15162 */
15163VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMovCRxRead(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iGReg, uint8_t iCrReg)
15164{
15165 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15166 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4
15167 | CPUMCTX_EXTRN_APIC_TPR);
15168 Assert(iCrReg < 16);
15169 Assert(iGReg < 16);
15170
15171 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15172 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_mov_Rd_Cd, iGReg, iCrReg);
15173 Assert(!pVCpu->iem.s.cActiveMappings);
15174 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15175}
15176
15177
15178/**
15179 * Interface for HM and EM to clear the CR0[TS] bit.
15180 *
15181 * @returns Strict VBox status code.
15182 * @param pVCpu The cross context virtual CPU structure.
15183 * @param cbInstr The instruction length in bytes.
15184 *
15185 * @remarks In ring-0 not all of the state needs to be synced in.
15186 */
15187VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClts(PVMCPUCC pVCpu, uint8_t cbInstr)
15188{
15189 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15190
15191 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15192 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clts);
15193 Assert(!pVCpu->iem.s.cActiveMappings);
15194 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15195}
15196
15197
15198/**
15199 * Interface for HM and EM to emulate the LMSW instruction (loads CR0).
15200 *
15201 * @returns Strict VBox status code.
15202 * @param pVCpu The cross context virtual CPU structure.
15203 * @param cbInstr The instruction length in bytes.
15204 * @param uValue The value to load into CR0.
15205 * @param GCPtrEffDst The guest-linear address if the LMSW instruction has a
15206 * memory operand. Otherwise pass NIL_RTGCPTR.
15207 *
15208 * @remarks In ring-0 not all of the state needs to be synced in.
15209 */
15210VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedLmsw(PVMCPUCC pVCpu, uint8_t cbInstr, uint16_t uValue, RTGCPTR GCPtrEffDst)
15211{
15212 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15213
15214 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15215 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_2(iemCImpl_lmsw, uValue, GCPtrEffDst);
15216 Assert(!pVCpu->iem.s.cActiveMappings);
15217 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15218}
15219
15220
15221/**
15222 * Interface for HM and EM to emulate the XSETBV instruction (loads XCRx).
15223 *
15224 * Takes input values in ecx and edx:eax of the CPU context of the calling EMT.
15225 *
15226 * @returns Strict VBox status code.
15227 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15228 * @param cbInstr The instruction length in bytes.
15229 * @remarks In ring-0 not all of the state needs to be synced in.
15230 * @thread EMT(pVCpu)
15231 */
15232VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedXsetbv(PVMCPUCC pVCpu, uint8_t cbInstr)
15233{
15234 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15235
15236 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15237 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_xsetbv);
15238 Assert(!pVCpu->iem.s.cActiveMappings);
15239 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15240}
15241
15242
15243/**
15244 * Interface for HM and EM to emulate the WBINVD instruction.
15245 *
15246 * @returns Strict VBox status code.
15247 * @param pVCpu The cross context virtual CPU structure.
15248 * @param cbInstr The instruction length in bytes.
15249 *
15250 * @remarks In ring-0 not all of the state needs to be synced in.
15251 */
15252VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWbinvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15253{
15254 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15255
15256 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15257 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wbinvd);
15258 Assert(!pVCpu->iem.s.cActiveMappings);
15259 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15260}
15261
15262
15263/**
15264 * Interface for HM and EM to emulate the INVD instruction.
15265 *
15266 * @returns Strict VBox status code.
15267 * @param pVCpu The cross context virtual CPU structure.
15268 * @param cbInstr The instruction length in bytes.
15269 *
15270 * @remarks In ring-0 not all of the state needs to be synced in.
15271 */
15272VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvd(PVMCPUCC pVCpu, uint8_t cbInstr)
15273{
15274 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15275
15276 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15277 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invd);
15278 Assert(!pVCpu->iem.s.cActiveMappings);
15279 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15280}
15281
15282
15283/**
15284 * Interface for HM and EM to emulate the INVLPG instruction.
15285 *
15286 * @returns Strict VBox status code.
15287 * @retval VINF_PGM_SYNC_CR3
15288 *
15289 * @param pVCpu The cross context virtual CPU structure.
15290 * @param cbInstr The instruction length in bytes.
15291 * @param GCPtrPage The effective address of the page to invalidate.
15292 *
15293 * @remarks In ring-0 not all of the state needs to be synced in.
15294 */
15295VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpg(PVMCPUCC pVCpu, uint8_t cbInstr, RTGCPTR GCPtrPage)
15296{
15297 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15298
15299 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15300 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_invlpg, GCPtrPage);
15301 Assert(!pVCpu->iem.s.cActiveMappings);
15302 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15303}
15304
15305
15306/**
15307 * Interface for HM and EM to emulate the INVPCID instruction.
15308 *
15309 * @returns Strict VBox status code.
15310 * @retval VINF_PGM_SYNC_CR3
15311 *
15312 * @param pVCpu The cross context virtual CPU structure.
15313 * @param cbInstr The instruction length in bytes.
15314 * @param iEffSeg The effective segment register.
15315 * @param GCPtrDesc The effective address of the INVPCID descriptor.
15316 * @param uType The invalidation type.
15317 *
15318 * @remarks In ring-0 not all of the state needs to be synced in.
15319 */
15320VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvpcid(PVMCPUCC pVCpu, uint8_t cbInstr, uint8_t iEffSeg, RTGCPTR GCPtrDesc,
15321 uint64_t uType)
15322{
15323 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 4);
15324
15325 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15326 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_3(iemCImpl_invpcid, iEffSeg, GCPtrDesc, uType);
15327 Assert(!pVCpu->iem.s.cActiveMappings);
15328 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15329}
15330
15331
15332/**
15333 * Interface for HM and EM to emulate the CPUID instruction.
15334 *
15335 * @returns Strict VBox status code.
15336 *
15337 * @param pVCpu The cross context virtual CPU structure.
15338 * @param cbInstr The instruction length in bytes.
15339 *
15340 * @remarks Not all of the state needs to be synced in, the usual pluss RAX and RCX.
15341 */
15342VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedCpuid(PVMCPUCC pVCpu, uint8_t cbInstr)
15343{
15344 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15345 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RCX);
15346
15347 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15348 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_cpuid);
15349 Assert(!pVCpu->iem.s.cActiveMappings);
15350 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15351}
15352
15353
15354/**
15355 * Interface for HM and EM to emulate the RDPMC instruction.
15356 *
15357 * @returns Strict VBox status code.
15358 *
15359 * @param pVCpu The cross context virtual CPU structure.
15360 * @param cbInstr The instruction length in bytes.
15361 *
15362 * @remarks Not all of the state needs to be synced in.
15363 */
15364VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdpmc(PVMCPUCC pVCpu, uint8_t cbInstr)
15365{
15366 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15367 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15368
15369 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15370 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdpmc);
15371 Assert(!pVCpu->iem.s.cActiveMappings);
15372 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15373}
15374
15375
15376/**
15377 * Interface for HM and EM to emulate the RDTSC instruction.
15378 *
15379 * @returns Strict VBox status code.
15380 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15381 *
15382 * @param pVCpu The cross context virtual CPU structure.
15383 * @param cbInstr The instruction length in bytes.
15384 *
15385 * @remarks Not all of the state needs to be synced in.
15386 */
15387VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtsc(PVMCPUCC pVCpu, uint8_t cbInstr)
15388{
15389 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15390 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4);
15391
15392 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15393 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtsc);
15394 Assert(!pVCpu->iem.s.cActiveMappings);
15395 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15396}
15397
15398
15399/**
15400 * Interface for HM and EM to emulate the RDTSCP instruction.
15401 *
15402 * @returns Strict VBox status code.
15403 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15404 *
15405 * @param pVCpu The cross context virtual CPU structure.
15406 * @param cbInstr The instruction length in bytes.
15407 *
15408 * @remarks Not all of the state needs to be synced in. Recommended
15409 * to include CPUMCTX_EXTRN_TSC_AUX, to avoid extra fetch call.
15410 */
15411VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdtscp(PVMCPUCC pVCpu, uint8_t cbInstr)
15412{
15413 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15414 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_TSC_AUX);
15415
15416 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15417 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdtscp);
15418 Assert(!pVCpu->iem.s.cActiveMappings);
15419 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15420}
15421
15422
15423/**
15424 * Interface for HM and EM to emulate the RDMSR instruction.
15425 *
15426 * @returns Strict VBox status code.
15427 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15428 *
15429 * @param pVCpu The cross context virtual CPU structure.
15430 * @param cbInstr The instruction length in bytes.
15431 *
15432 * @remarks Not all of the state needs to be synced in. Requires RCX and
15433 * (currently) all MSRs.
15434 */
15435VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedRdmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15436{
15437 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15438 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_ALL_MSRS);
15439
15440 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15441 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_rdmsr);
15442 Assert(!pVCpu->iem.s.cActiveMappings);
15443 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15444}
15445
15446
15447/**
15448 * Interface for HM and EM to emulate the WRMSR instruction.
15449 *
15450 * @returns Strict VBox status code.
15451 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15452 *
15453 * @param pVCpu The cross context virtual CPU structure.
15454 * @param cbInstr The instruction length in bytes.
15455 *
15456 * @remarks Not all of the state needs to be synced in. Requires RCX, RAX, RDX,
15457 * and (currently) all MSRs.
15458 */
15459VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedWrmsr(PVMCPUCC pVCpu, uint8_t cbInstr)
15460{
15461 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 2);
15462 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK
15463 | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX | CPUMCTX_EXTRN_RDX | CPUMCTX_EXTRN_ALL_MSRS);
15464
15465 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15466 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_wrmsr);
15467 Assert(!pVCpu->iem.s.cActiveMappings);
15468 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15469}
15470
15471
15472/**
15473 * Interface for HM and EM to emulate the MONITOR instruction.
15474 *
15475 * @returns Strict VBox status code.
15476 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15477 *
15478 * @param pVCpu The cross context virtual CPU structure.
15479 * @param cbInstr The instruction length in bytes.
15480 *
15481 * @remarks Not all of the state needs to be synced in.
15482 * @remarks ASSUMES the default segment of DS and no segment override prefixes
15483 * are used.
15484 */
15485VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMonitor(PVMCPUCC pVCpu, uint8_t cbInstr)
15486{
15487 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15488 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_DS);
15489
15490 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15491 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_1(iemCImpl_monitor, X86_SREG_DS);
15492 Assert(!pVCpu->iem.s.cActiveMappings);
15493 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15494}
15495
15496
15497/**
15498 * Interface for HM and EM to emulate the MWAIT instruction.
15499 *
15500 * @returns Strict VBox status code.
15501 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15502 *
15503 * @param pVCpu The cross context virtual CPU structure.
15504 * @param cbInstr The instruction length in bytes.
15505 *
15506 * @remarks Not all of the state needs to be synced in.
15507 */
15508VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedMwait(PVMCPUCC pVCpu, uint8_t cbInstr)
15509{
15510 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15511 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_RCX | CPUMCTX_EXTRN_RAX);
15512
15513 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15514 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_mwait);
15515 Assert(!pVCpu->iem.s.cActiveMappings);
15516 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15517}
15518
15519
15520/**
15521 * Interface for HM and EM to emulate the HLT instruction.
15522 *
15523 * @returns Strict VBox status code.
15524 * @retval VINF_IEM_RAISED_XCPT (VINF_EM_RESCHEDULE) if exception is raised.
15525 *
15526 * @param pVCpu The cross context virtual CPU structure.
15527 * @param cbInstr The instruction length in bytes.
15528 *
15529 * @remarks Not all of the state needs to be synced in.
15530 */
15531VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedHlt(PVMCPUCC pVCpu, uint8_t cbInstr)
15532{
15533 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 1);
15534
15535 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15536 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_hlt);
15537 Assert(!pVCpu->iem.s.cActiveMappings);
15538 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15539}
15540
15541
15542/**
15543 * Checks if IEM is in the process of delivering an event (interrupt or
15544 * exception).
15545 *
15546 * @returns true if we're in the process of raising an interrupt or exception,
15547 * false otherwise.
15548 * @param pVCpu The cross context virtual CPU structure.
15549 * @param puVector Where to store the vector associated with the
15550 * currently delivered event, optional.
15551 * @param pfFlags Where to store th event delivery flags (see
15552 * IEM_XCPT_FLAGS_XXX), optional.
15553 * @param puErr Where to store the error code associated with the
15554 * event, optional.
15555 * @param puCr2 Where to store the CR2 associated with the event,
15556 * optional.
15557 * @remarks The caller should check the flags to determine if the error code and
15558 * CR2 are valid for the event.
15559 */
15560VMM_INT_DECL(bool) IEMGetCurrentXcpt(PVMCPUCC pVCpu, uint8_t *puVector, uint32_t *pfFlags, uint32_t *puErr, uint64_t *puCr2)
15561{
15562 bool const fRaisingXcpt = pVCpu->iem.s.cXcptRecursions > 0;
15563 if (fRaisingXcpt)
15564 {
15565 if (puVector)
15566 *puVector = pVCpu->iem.s.uCurXcpt;
15567 if (pfFlags)
15568 *pfFlags = pVCpu->iem.s.fCurXcpt;
15569 if (puErr)
15570 *puErr = pVCpu->iem.s.uCurXcptErr;
15571 if (puCr2)
15572 *puCr2 = pVCpu->iem.s.uCurXcptCr2;
15573 }
15574 return fRaisingXcpt;
15575}
15576
15577#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
15578
15579/**
15580 * Interface for HM and EM to emulate the CLGI instruction.
15581 *
15582 * @returns Strict VBox status code.
15583 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15584 * @param cbInstr The instruction length in bytes.
15585 * @thread EMT(pVCpu)
15586 */
15587VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedClgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15588{
15589 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15590
15591 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15592 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_clgi);
15593 Assert(!pVCpu->iem.s.cActiveMappings);
15594 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15595}
15596
15597
15598/**
15599 * Interface for HM and EM to emulate the STGI instruction.
15600 *
15601 * @returns Strict VBox status code.
15602 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15603 * @param cbInstr The instruction length in bytes.
15604 * @thread EMT(pVCpu)
15605 */
15606VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedStgi(PVMCPUCC pVCpu, uint8_t cbInstr)
15607{
15608 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15609
15610 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15611 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_stgi);
15612 Assert(!pVCpu->iem.s.cActiveMappings);
15613 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15614}
15615
15616
15617/**
15618 * Interface for HM and EM to emulate the VMLOAD instruction.
15619 *
15620 * @returns Strict VBox status code.
15621 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15622 * @param cbInstr The instruction length in bytes.
15623 * @thread EMT(pVCpu)
15624 */
15625VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmload(PVMCPUCC pVCpu, uint8_t cbInstr)
15626{
15627 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15628
15629 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15630 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmload);
15631 Assert(!pVCpu->iem.s.cActiveMappings);
15632 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15633}
15634
15635
15636/**
15637 * Interface for HM and EM to emulate the VMSAVE instruction.
15638 *
15639 * @returns Strict VBox status code.
15640 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15641 * @param cbInstr The instruction length in bytes.
15642 * @thread EMT(pVCpu)
15643 */
15644VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmsave(PVMCPUCC pVCpu, uint8_t cbInstr)
15645{
15646 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15647
15648 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15649 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmsave);
15650 Assert(!pVCpu->iem.s.cActiveMappings);
15651 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15652}
15653
15654
15655/**
15656 * Interface for HM and EM to emulate the INVLPGA instruction.
15657 *
15658 * @returns Strict VBox status code.
15659 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15660 * @param cbInstr The instruction length in bytes.
15661 * @thread EMT(pVCpu)
15662 */
15663VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvlpga(PVMCPUCC pVCpu, uint8_t cbInstr)
15664{
15665 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15666
15667 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15668 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_invlpga);
15669 Assert(!pVCpu->iem.s.cActiveMappings);
15670 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15671}
15672
15673
15674/**
15675 * Interface for HM and EM to emulate the VMRUN instruction.
15676 *
15677 * @returns Strict VBox status code.
15678 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15679 * @param cbInstr The instruction length in bytes.
15680 * @thread EMT(pVCpu)
15681 */
15682VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmrun(PVMCPUCC pVCpu, uint8_t cbInstr)
15683{
15684 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
15685 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK);
15686
15687 iemInitExec(pVCpu, false /*fBypassHandlers*/);
15688 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmrun);
15689 Assert(!pVCpu->iem.s.cActiveMappings);
15690 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
15691}
15692
15693
15694/**
15695 * Interface for HM and EM to emulate \#VMEXIT.
15696 *
15697 * @returns Strict VBox status code.
15698 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15699 * @param uExitCode The exit code.
15700 * @param uExitInfo1 The exit info. 1 field.
15701 * @param uExitInfo2 The exit info. 2 field.
15702 * @thread EMT(pVCpu)
15703 */
15704VMM_INT_DECL(VBOXSTRICTRC) IEMExecSvmVmexit(PVMCPUCC pVCpu, uint64_t uExitCode, uint64_t uExitInfo1, uint64_t uExitInfo2)
15705{
15706 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
15707 VBOXSTRICTRC rcStrict = iemSvmVmexit(pVCpu, uExitCode, uExitInfo1, uExitInfo2);
15708 if (pVCpu->iem.s.cActiveMappings)
15709 iemMemRollback(pVCpu);
15710 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15711}
15712
15713#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
15714
15715#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
15716
15717/**
15718 * Interface for HM and EM to read a VMCS field from the nested-guest VMCS.
15719 *
15720 * It is ASSUMED the caller knows what they're doing. No VMREAD instruction checks
15721 * are performed. Bounds checks are strict builds only.
15722 *
15723 * @param pVmcs Pointer to the virtual VMCS.
15724 * @param u64VmcsField The VMCS field.
15725 * @param pu64Dst Where to store the VMCS value.
15726 *
15727 * @remarks May be called with interrupts disabled.
15728 * @todo This should probably be moved to CPUM someday.
15729 */
15730VMM_INT_DECL(void) IEMReadVmxVmcsField(PCVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t *pu64Dst)
15731{
15732 AssertPtr(pVmcs);
15733 AssertPtr(pu64Dst);
15734 iemVmxVmreadNoCheck(pVmcs, pu64Dst, u64VmcsField);
15735}
15736
15737
15738/**
15739 * Interface for HM and EM to write a VMCS field in the nested-guest VMCS.
15740 *
15741 * It is ASSUMED the caller knows what they're doing. No VMWRITE instruction checks
15742 * are performed. Bounds checks are strict builds only.
15743 *
15744 * @param pVmcs Pointer to the virtual VMCS.
15745 * @param u64VmcsField The VMCS field.
15746 * @param u64Val The value to write.
15747 *
15748 * @remarks May be called with interrupts disabled.
15749 * @todo This should probably be moved to CPUM someday.
15750 */
15751VMM_INT_DECL(void) IEMWriteVmxVmcsField(PVMXVVMCS pVmcs, uint64_t u64VmcsField, uint64_t u64Val)
15752{
15753 AssertPtr(pVmcs);
15754 iemVmxVmwriteNoCheck(pVmcs, u64Val, u64VmcsField);
15755}
15756
15757
15758/**
15759 * Interface for HM and EM to virtualize x2APIC MSR accesses.
15760 *
15761 * @returns Strict VBox status code.
15762 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the MSR access was virtualized.
15763 * @retval VINF_VMX_INTERCEPT_NOT_ACTIVE if the MSR access must be handled by
15764 * the x2APIC device.
15765 * @retval VERR_OUT_RANGE if the caller must raise \#GP(0).
15766 *
15767 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15768 * @param idMsr The MSR being read.
15769 * @param pu64Value Pointer to the value being written or where to store the
15770 * value being read.
15771 * @param fWrite Whether this is an MSR write or read access.
15772 * @thread EMT(pVCpu)
15773 */
15774VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVirtApicAccessMsr(PVMCPUCC pVCpu, uint32_t idMsr, uint64_t *pu64Value, bool fWrite)
15775{
15776 Assert(pu64Value);
15777
15778 VBOXSTRICTRC rcStrict;
15779 if (fWrite)
15780 rcStrict = iemVmxVirtApicAccessMsrWrite(pVCpu, idMsr, *pu64Value);
15781 else
15782 rcStrict = iemVmxVirtApicAccessMsrRead(pVCpu, idMsr, pu64Value);
15783 Assert(!pVCpu->iem.s.cActiveMappings);
15784 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15785
15786}
15787
15788
15789/**
15790 * Interface for HM and EM to virtualize memory-mapped APIC accesses.
15791 *
15792 * @returns Strict VBox status code.
15793 * @retval VINF_VMX_MODIFIES_BEHAVIOR if the memory access was virtualized.
15794 * @retval VINF_VMX_VMEXIT if the access causes a VM-exit.
15795 *
15796 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15797 * @param pExitInfo Pointer to the VM-exit information.
15798 * @param pExitEventInfo Pointer to the VM-exit event information.
15799 * @thread EMT(pVCpu)
15800 */
15801VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicAccess(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15802{
15803 Assert(pExitInfo);
15804 Assert(pExitEventInfo);
15805 VBOXSTRICTRC rcStrict = iemVmxVmexitApicAccessWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15806 Assert(!pVCpu->iem.s.cActiveMappings);
15807 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15808
15809}
15810
15811
15812/**
15813 * Interface for HM and EM to perform an APIC-write emulation which may cause a
15814 * VM-exit.
15815 *
15816 * @returns Strict VBox status code.
15817 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15818 * @thread EMT(pVCpu)
15819 */
15820VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitApicWrite(PVMCPUCC pVCpu)
15821{
15822 VBOXSTRICTRC rcStrict = iemVmxApicWriteEmulation(pVCpu);
15823 Assert(!pVCpu->iem.s.cActiveMappings);
15824 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15825}
15826
15827
15828/**
15829 * Interface for HM and EM to emulate VM-exit due to expiry of the preemption timer.
15830 *
15831 * @returns Strict VBox status code.
15832 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15833 * @thread EMT(pVCpu)
15834 */
15835VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitPreemptTimer(PVMCPUCC pVCpu)
15836{
15837 VBOXSTRICTRC rcStrict = iemVmxVmexitPreemptTimer(pVCpu);
15838 Assert(!pVCpu->iem.s.cActiveMappings);
15839 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15840}
15841
15842
15843/**
15844 * Interface for HM and EM to emulate VM-exit due to external interrupts.
15845 *
15846 * @returns Strict VBox status code.
15847 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15848 * @param uVector The external interrupt vector (pass 0 if the external
15849 * interrupt is still pending).
15850 * @param fIntPending Whether the external interrupt is pending or
15851 * acknowdledged in the interrupt controller.
15852 * @thread EMT(pVCpu)
15853 */
15854VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitExtInt(PVMCPUCC pVCpu, uint8_t uVector, bool fIntPending)
15855{
15856 VBOXSTRICTRC rcStrict = iemVmxVmexitExtInt(pVCpu, uVector, fIntPending);
15857 Assert(!pVCpu->iem.s.cActiveMappings);
15858 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15859}
15860
15861
15862/**
15863 * Interface for HM and EM to emulate VM-exit due to exceptions.
15864 *
15865 * Exception includes NMIs, software exceptions (those generated by INT3 or
15866 * INTO) and privileged software exceptions (those generated by INT1/ICEBP).
15867 *
15868 * @returns Strict VBox status code.
15869 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15870 * @param pExitInfo Pointer to the VM-exit information.
15871 * @param pExitEventInfo Pointer to the VM-exit event information.
15872 * @thread EMT(pVCpu)
15873 */
15874VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcpt(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
15875{
15876 Assert(pExitInfo);
15877 Assert(pExitEventInfo);
15878 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, pExitInfo, pExitEventInfo);
15879 Assert(!pVCpu->iem.s.cActiveMappings);
15880 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15881}
15882
15883
15884/**
15885 * Interface for HM and EM to emulate VM-exit due to NMIs.
15886 *
15887 * @returns Strict VBox status code.
15888 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15889 * @thread EMT(pVCpu)
15890 */
15891VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitXcptNmi(PVMCPUCC pVCpu)
15892{
15893 VMXVEXITINFO ExitInfo;
15894 RT_ZERO(ExitInfo);
15895 ExitInfo.uReason = VMX_EXIT_XCPT_OR_NMI;
15896
15897 VMXVEXITEVENTINFO ExitEventInfo;
15898 RT_ZERO(ExitEventInfo);
15899 ExitEventInfo.uExitIntInfo = RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VALID, 1)
15900 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_TYPE, VMX_EXIT_INT_INFO_TYPE_NMI)
15901 | RT_BF_MAKE(VMX_BF_EXIT_INT_INFO_VECTOR, X86_XCPT_NMI);
15902
15903 VBOXSTRICTRC rcStrict = iemVmxVmexitEventWithInfo(pVCpu, &ExitInfo, &ExitEventInfo);
15904 Assert(!pVCpu->iem.s.cActiveMappings);
15905 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15906}
15907
15908
15909/**
15910 * Interface for HM and EM to emulate VM-exit due to a triple-fault.
15911 *
15912 * @returns Strict VBox status code.
15913 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15914 * @thread EMT(pVCpu)
15915 */
15916VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTripleFault(PVMCPUCC pVCpu)
15917{
15918 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_TRIPLE_FAULT, 0 /* u64ExitQual */);
15919 Assert(!pVCpu->iem.s.cActiveMappings);
15920 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15921}
15922
15923
15924/**
15925 * Interface for HM and EM to emulate VM-exit due to startup-IPI (SIPI).
15926 *
15927 * @returns Strict VBox status code.
15928 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15929 * @param uVector The SIPI vector.
15930 * @thread EMT(pVCpu)
15931 */
15932VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitStartupIpi(PVMCPUCC pVCpu, uint8_t uVector)
15933{
15934 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, VMX_EXIT_SIPI, uVector);
15935 Assert(!pVCpu->iem.s.cActiveMappings);
15936 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15937}
15938
15939
15940/**
15941 * Interface for HM and EM to emulate a VM-exit.
15942 *
15943 * If a specialized version of a VM-exit handler exists, that must be used instead.
15944 *
15945 * @returns Strict VBox status code.
15946 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15947 * @param uExitReason The VM-exit reason.
15948 * @param u64ExitQual The Exit qualification.
15949 * @thread EMT(pVCpu)
15950 */
15951VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexit(PVMCPUCC pVCpu, uint32_t uExitReason, uint64_t u64ExitQual)
15952{
15953 VBOXSTRICTRC rcStrict = iemVmxVmexit(pVCpu, uExitReason, u64ExitQual);
15954 Assert(!pVCpu->iem.s.cActiveMappings);
15955 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15956}
15957
15958
15959/**
15960 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15961 *
15962 * This is meant to be used for those instructions that VMX provides additional
15963 * decoding information beyond just the instruction length!
15964 *
15965 * @returns Strict VBox status code.
15966 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15967 * @param pExitInfo Pointer to the VM-exit information.
15968 * @thread EMT(pVCpu)
15969 */
15970VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstrWithInfo(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
15971{
15972 VBOXSTRICTRC rcStrict = iemVmxVmexitInstrWithInfo(pVCpu, pExitInfo);
15973 Assert(!pVCpu->iem.s.cActiveMappings);
15974 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15975}
15976
15977
15978/**
15979 * Interface for HM and EM to emulate a VM-exit due to an instruction.
15980 *
15981 * This is meant to be used for those instructions that VMX provides only the
15982 * instruction length.
15983 *
15984 * @returns Strict VBox status code.
15985 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
15986 * @param pExitInfo Pointer to the VM-exit information.
15987 * @param cbInstr The instruction length in bytes.
15988 * @thread EMT(pVCpu)
15989 */
15990VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitInstr(PVMCPUCC pVCpu, uint32_t uExitReason, uint8_t cbInstr)
15991{
15992 VBOXSTRICTRC rcStrict = iemVmxVmexitInstr(pVCpu, uExitReason, cbInstr);
15993 Assert(!pVCpu->iem.s.cActiveMappings);
15994 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
15995}
15996
15997
15998/**
15999 * Interface for HM and EM to emulate a trap-like VM-exit (MTF, APIC-write,
16000 * Virtualized-EOI, TPR-below threshold).
16001 *
16002 * @returns Strict VBox status code.
16003 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16004 * @param pExitInfo Pointer to the VM-exit information.
16005 * @thread EMT(pVCpu)
16006 */
16007VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTrapLike(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16008{
16009 Assert(pExitInfo);
16010 VBOXSTRICTRC rcStrict = iemVmxVmexitTrapLikeWithInfo(pVCpu, pExitInfo);
16011 Assert(!pVCpu->iem.s.cActiveMappings);
16012 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16013}
16014
16015
16016/**
16017 * Interface for HM and EM to emulate a VM-exit due to a task switch.
16018 *
16019 * @returns Strict VBox status code.
16020 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16021 * @param pExitInfo Pointer to the VM-exit information.
16022 * @param pExitEventInfo Pointer to the VM-exit event information.
16023 * @thread EMT(pVCpu)
16024 */
16025VMM_INT_DECL(VBOXSTRICTRC) IEMExecVmxVmexitTaskSwitch(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo, PCVMXVEXITEVENTINFO pExitEventInfo)
16026{
16027 Assert(pExitInfo);
16028 Assert(pExitEventInfo);
16029 Assert(pExitInfo->uReason == VMX_EXIT_TASK_SWITCH);
16030 VBOXSTRICTRC rcStrict = iemVmxVmexitTaskSwitchWithInfo(pVCpu, pExitInfo, pExitEventInfo);
16031 Assert(!pVCpu->iem.s.cActiveMappings);
16032 return iemExecStatusCodeFiddling(pVCpu, rcStrict);
16033}
16034
16035
16036/**
16037 * Interface for HM and EM to emulate the VMREAD instruction.
16038 *
16039 * @returns Strict VBox status code.
16040 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16041 * @param pExitInfo Pointer to the VM-exit information.
16042 * @thread EMT(pVCpu)
16043 */
16044VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmread(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16045{
16046 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16047 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16048 Assert(pExitInfo);
16049
16050 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16051
16052 VBOXSTRICTRC rcStrict;
16053 uint8_t const cbInstr = pExitInfo->cbInstr;
16054 bool const fIs64BitMode = RT_BOOL(pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT);
16055 uint64_t const u64FieldEnc = fIs64BitMode
16056 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16057 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16058 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16059 {
16060 if (fIs64BitMode)
16061 {
16062 uint64_t *pu64Dst = iemGRegRefU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16063 rcStrict = iemVmxVmreadReg64(pVCpu, cbInstr, pu64Dst, u64FieldEnc, pExitInfo);
16064 }
16065 else
16066 {
16067 uint32_t *pu32Dst = iemGRegRefU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16068 rcStrict = iemVmxVmreadReg32(pVCpu, cbInstr, pu32Dst, u64FieldEnc, pExitInfo);
16069 }
16070 }
16071 else
16072 {
16073 RTGCPTR const GCPtrDst = pExitInfo->GCPtrEffAddr;
16074 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16075 rcStrict = iemVmxVmreadMem(pVCpu, cbInstr, iEffSeg, GCPtrDst, u64FieldEnc, pExitInfo);
16076 }
16077 Assert(!pVCpu->iem.s.cActiveMappings);
16078 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16079}
16080
16081
16082/**
16083 * Interface for HM and EM to emulate the VMWRITE instruction.
16084 *
16085 * @returns Strict VBox status code.
16086 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16087 * @param pExitInfo Pointer to the VM-exit information.
16088 * @thread EMT(pVCpu)
16089 */
16090VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmwrite(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16091{
16092 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16093 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16094 Assert(pExitInfo);
16095
16096 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16097
16098 uint64_t u64Val;
16099 uint8_t iEffSeg;
16100 if (pExitInfo->InstrInfo.VmreadVmwrite.fIsRegOperand)
16101 {
16102 u64Val = iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg1);
16103 iEffSeg = UINT8_MAX;
16104 }
16105 else
16106 {
16107 u64Val = pExitInfo->GCPtrEffAddr;
16108 iEffSeg = pExitInfo->InstrInfo.VmreadVmwrite.iSegReg;
16109 }
16110 uint8_t const cbInstr = pExitInfo->cbInstr;
16111 uint64_t const u64FieldEnc = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16112 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2)
16113 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.VmreadVmwrite.iReg2);
16114 VBOXSTRICTRC rcStrict = iemVmxVmwrite(pVCpu, cbInstr, iEffSeg, u64Val, u64FieldEnc, pExitInfo);
16115 Assert(!pVCpu->iem.s.cActiveMappings);
16116 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16117}
16118
16119
16120/**
16121 * Interface for HM and EM to emulate the VMPTRLD instruction.
16122 *
16123 * @returns Strict VBox status code.
16124 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16125 * @param pExitInfo Pointer to the VM-exit information.
16126 * @thread EMT(pVCpu)
16127 */
16128VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrld(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16129{
16130 Assert(pExitInfo);
16131 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16132 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16133
16134 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16135
16136 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16137 uint8_t const cbInstr = pExitInfo->cbInstr;
16138 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16139 VBOXSTRICTRC rcStrict = iemVmxVmptrld(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16140 Assert(!pVCpu->iem.s.cActiveMappings);
16141 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16142}
16143
16144
16145/**
16146 * Interface for HM and EM to emulate the VMPTRST instruction.
16147 *
16148 * @returns Strict VBox status code.
16149 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16150 * @param pExitInfo Pointer to the VM-exit information.
16151 * @thread EMT(pVCpu)
16152 */
16153VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmptrst(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16154{
16155 Assert(pExitInfo);
16156 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16157 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16158
16159 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16160
16161 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16162 uint8_t const cbInstr = pExitInfo->cbInstr;
16163 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16164 VBOXSTRICTRC rcStrict = iemVmxVmptrst(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16165 Assert(!pVCpu->iem.s.cActiveMappings);
16166 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16167}
16168
16169
16170/**
16171 * Interface for HM and EM to emulate the VMCLEAR instruction.
16172 *
16173 * @returns Strict VBox status code.
16174 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16175 * @param pExitInfo Pointer to the VM-exit information.
16176 * @thread EMT(pVCpu)
16177 */
16178VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmclear(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16179{
16180 Assert(pExitInfo);
16181 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16182 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16183
16184 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16185
16186 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16187 uint8_t const cbInstr = pExitInfo->cbInstr;
16188 RTGCPTR const GCPtrVmcs = pExitInfo->GCPtrEffAddr;
16189 VBOXSTRICTRC rcStrict = iemVmxVmclear(pVCpu, cbInstr, iEffSeg, GCPtrVmcs, pExitInfo);
16190 Assert(!pVCpu->iem.s.cActiveMappings);
16191 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16192}
16193
16194
16195/**
16196 * Interface for HM and EM to emulate the VMLAUNCH/VMRESUME instruction.
16197 *
16198 * @returns Strict VBox status code.
16199 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16200 * @param cbInstr The instruction length in bytes.
16201 * @param uInstrId The instruction ID (VMXINSTRID_VMLAUNCH or
16202 * VMXINSTRID_VMRESUME).
16203 * @thread EMT(pVCpu)
16204 */
16205VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmlaunchVmresume(PVMCPUCC pVCpu, uint8_t cbInstr, VMXINSTRID uInstrId)
16206{
16207 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16208 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMENTRY_MASK);
16209
16210 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16211 VBOXSTRICTRC rcStrict = iemVmxVmlaunchVmresume(pVCpu, cbInstr, uInstrId);
16212 Assert(!pVCpu->iem.s.cActiveMappings);
16213 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16214}
16215
16216
16217/**
16218 * Interface for HM and EM to emulate the VMXON instruction.
16219 *
16220 * @returns Strict VBox status code.
16221 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16222 * @param pExitInfo Pointer to the VM-exit information.
16223 * @thread EMT(pVCpu)
16224 */
16225VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxon(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16226{
16227 Assert(pExitInfo);
16228 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 3);
16229 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16230
16231 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16232
16233 uint8_t const iEffSeg = pExitInfo->InstrInfo.VmxXsave.iSegReg;
16234 uint8_t const cbInstr = pExitInfo->cbInstr;
16235 RTGCPTR const GCPtrVmxon = pExitInfo->GCPtrEffAddr;
16236 VBOXSTRICTRC rcStrict = iemVmxVmxon(pVCpu, cbInstr, iEffSeg, GCPtrVmxon, pExitInfo);
16237 Assert(!pVCpu->iem.s.cActiveMappings);
16238 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16239}
16240
16241
16242/**
16243 * Interface for HM and EM to emulate the VMXOFF instruction.
16244 *
16245 * @returns Strict VBox status code.
16246 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16247 * @param cbInstr The instruction length in bytes.
16248 * @thread EMT(pVCpu)
16249 */
16250VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedVmxoff(PVMCPUCC pVCpu, uint8_t cbInstr)
16251{
16252 IEMEXEC_ASSERT_INSTR_LEN_RETURN(cbInstr, 3);
16253 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_NO_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16254
16255 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16256 VBOXSTRICTRC rcStrict = IEM_CIMPL_CALL_0(iemCImpl_vmxoff);
16257 Assert(!pVCpu->iem.s.cActiveMappings);
16258 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16259}
16260
16261
16262/**
16263 * Interface for HM and EM to emulate the INVVPID instruction.
16264 *
16265 * @returns Strict VBox status code.
16266 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16267 * @param pExitInfo Pointer to the VM-exit information.
16268 * @thread EMT(pVCpu)
16269 */
16270VMM_INT_DECL(VBOXSTRICTRC) IEMExecDecodedInvvpid(PVMCPUCC pVCpu, PCVMXVEXITINFO pExitInfo)
16271{
16272 IEMEXEC_ASSERT_INSTR_LEN_RETURN(pExitInfo->cbInstr, 4);
16273 IEM_CTX_ASSERT(pVCpu, IEM_CPUMCTX_EXTRN_EXEC_DECODED_MEM_MASK | CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI);
16274 Assert(pExitInfo);
16275
16276 iemInitExec(pVCpu, false /*fBypassHandlers*/);
16277
16278 uint8_t const iEffSeg = pExitInfo->InstrInfo.Inv.iSegReg;
16279 uint8_t const cbInstr = pExitInfo->cbInstr;
16280 RTGCPTR const GCPtrInvvpidDesc = pExitInfo->GCPtrEffAddr;
16281 uint64_t const u64InvvpidType = pVCpu->iem.s.enmCpuMode == IEMMODE_64BIT
16282 ? iemGRegFetchU64(pVCpu, pExitInfo->InstrInfo.Inv.iReg2)
16283 : iemGRegFetchU32(pVCpu, pExitInfo->InstrInfo.Inv.iReg2);
16284 VBOXSTRICTRC rcStrict = iemVmxInvvpid(pVCpu, cbInstr, iEffSeg, GCPtrInvvpidDesc, u64InvvpidType, pExitInfo);
16285 Assert(!pVCpu->iem.s.cActiveMappings);
16286 return iemUninitExecAndFiddleStatusAndMaybeReenter(pVCpu, rcStrict);
16287}
16288
16289
16290/**
16291 * @callback_method_impl{FNPGMPHYSHANDLER, VMX APIC-access page accesses}
16292 *
16293 * @remarks The @a pvUser argument is currently unused.
16294 */
16295PGM_ALL_CB2_DECL(VBOXSTRICTRC) iemVmxApicAccessPageHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhysFault, void *pvPhys,
16296 void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType,
16297 PGMACCESSORIGIN enmOrigin, void *pvUser)
16298{
16299 RT_NOREF3(pvPhys, enmOrigin, pvUser);
16300
16301 RTGCPHYS const GCPhysAccessBase = GCPhysFault & ~(RTGCPHYS)PAGE_OFFSET_MASK;
16302 if (CPUMIsGuestInVmxNonRootMode(IEM_GET_CTX(pVCpu)))
16303 {
16304 Assert(CPUMIsGuestVmxProcCtls2Set(IEM_GET_CTX(pVCpu), VMX_PROC_CTLS2_VIRT_APIC_ACCESS));
16305 Assert(CPUMGetGuestVmxApicAccessPageAddr(IEM_GET_CTX(pVCpu)) == GCPhysAccessBase);
16306
16307 /** @todo NSTVMX: How are we to distinguish instruction fetch accesses here?
16308 * Currently they will go through as read accesses. */
16309 uint32_t const fAccess = enmAccessType == PGMACCESSTYPE_WRITE ? IEM_ACCESS_TYPE_WRITE : IEM_ACCESS_TYPE_READ;
16310 uint16_t const offAccess = GCPhysFault & PAGE_OFFSET_MASK;
16311 VBOXSTRICTRC rcStrict = iemVmxVirtApicAccessMem(pVCpu, offAccess, cbBuf, pvBuf, fAccess);
16312 if (RT_FAILURE(rcStrict))
16313 return rcStrict;
16314
16315 /* Any access on this APIC-access page has been handled, caller should not carry out the access. */
16316 return VINF_SUCCESS;
16317 }
16318
16319 Log(("iemVmxApicAccessPageHandler: Access outside VMX non-root mode, deregistering page at %#RGp\n", GCPhysAccessBase));
16320 int rc = PGMHandlerPhysicalDeregister(pVM, GCPhysAccessBase);
16321 if (RT_FAILURE(rc))
16322 return rc;
16323
16324 /* Instruct the caller of this handler to perform the read/write as normal memory. */
16325 return VINF_PGM_HANDLER_DO_DEFAULT;
16326}
16327
16328#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
16329
16330#ifdef IN_RING3
16331
16332/**
16333 * Handles the unlikely and probably fatal merge cases.
16334 *
16335 * @returns Merged status code.
16336 * @param rcStrict Current EM status code.
16337 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16338 * with @a rcStrict.
16339 * @param iMemMap The memory mapping index. For error reporting only.
16340 * @param pVCpu The cross context virtual CPU structure of the calling
16341 * thread, for error reporting only.
16342 */
16343DECL_NO_INLINE(static, VBOXSTRICTRC) iemR3MergeStatusSlow(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit,
16344 unsigned iMemMap, PVMCPUCC pVCpu)
16345{
16346 if (RT_FAILURE_NP(rcStrict))
16347 return rcStrict;
16348
16349 if (RT_FAILURE_NP(rcStrictCommit))
16350 return rcStrictCommit;
16351
16352 if (rcStrict == rcStrictCommit)
16353 return rcStrictCommit;
16354
16355 AssertLogRelMsgFailed(("rcStrictCommit=%Rrc rcStrict=%Rrc iMemMap=%u fAccess=%#x FirstPg=%RGp LB %u SecondPg=%RGp LB %u\n",
16356 VBOXSTRICTRC_VAL(rcStrictCommit), VBOXSTRICTRC_VAL(rcStrict), iMemMap,
16357 pVCpu->iem.s.aMemMappings[iMemMap].fAccess,
16358 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst,
16359 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond));
16360 return VERR_IOM_FF_STATUS_IPE;
16361}
16362
16363
16364/**
16365 * Helper for IOMR3ProcessForceFlag.
16366 *
16367 * @returns Merged status code.
16368 * @param rcStrict Current EM status code.
16369 * @param rcStrictCommit The IOM I/O or MMIO write commit status to merge
16370 * with @a rcStrict.
16371 * @param iMemMap The memory mapping index. For error reporting only.
16372 * @param pVCpu The cross context virtual CPU structure of the calling
16373 * thread, for error reporting only.
16374 */
16375DECLINLINE(VBOXSTRICTRC) iemR3MergeStatus(VBOXSTRICTRC rcStrict, VBOXSTRICTRC rcStrictCommit, unsigned iMemMap, PVMCPUCC pVCpu)
16376{
16377 /* Simple. */
16378 if (RT_LIKELY(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RAW_TO_R3))
16379 return rcStrictCommit;
16380
16381 if (RT_LIKELY(rcStrictCommit == VINF_SUCCESS))
16382 return rcStrict;
16383
16384 /* EM scheduling status codes. */
16385 if (RT_LIKELY( rcStrict >= VINF_EM_FIRST
16386 && rcStrict <= VINF_EM_LAST))
16387 {
16388 if (RT_LIKELY( rcStrictCommit >= VINF_EM_FIRST
16389 && rcStrictCommit <= VINF_EM_LAST))
16390 return rcStrict < rcStrictCommit ? rcStrict : rcStrictCommit;
16391 }
16392
16393 /* Unlikely */
16394 return iemR3MergeStatusSlow(rcStrict, rcStrictCommit, iMemMap, pVCpu);
16395}
16396
16397
16398/**
16399 * Called by force-flag handling code when VMCPU_FF_IEM is set.
16400 *
16401 * @returns Merge between @a rcStrict and what the commit operation returned.
16402 * @param pVM The cross context VM structure.
16403 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
16404 * @param rcStrict The status code returned by ring-0 or raw-mode.
16405 */
16406VMMR3_INT_DECL(VBOXSTRICTRC) IEMR3ProcessForceFlag(PVM pVM, PVMCPUCC pVCpu, VBOXSTRICTRC rcStrict)
16407{
16408 /*
16409 * Reset the pending commit.
16410 */
16411 AssertMsg( (pVCpu->iem.s.aMemMappings[0].fAccess | pVCpu->iem.s.aMemMappings[1].fAccess | pVCpu->iem.s.aMemMappings[2].fAccess)
16412 & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND),
16413 ("%#x %#x %#x\n",
16414 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16415 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_IEM);
16416
16417 /*
16418 * Commit the pending bounce buffers (usually just one).
16419 */
16420 unsigned cBufs = 0;
16421 unsigned iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings);
16422 while (iMemMap-- > 0)
16423 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & (IEM_ACCESS_PENDING_R3_WRITE_1ST | IEM_ACCESS_PENDING_R3_WRITE_2ND))
16424 {
16425 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_TYPE_WRITE);
16426 Assert(pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_BOUNCE_BUFFERED);
16427 Assert(!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned);
16428
16429 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst;
16430 uint16_t const cbSecond = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond;
16431 uint8_t const *pbBuf = &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0];
16432
16433 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_1ST)
16434 {
16435 VBOXSTRICTRC rcStrictCommit1 = PGMPhysWrite(pVM,
16436 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst,
16437 pbBuf,
16438 cbFirst,
16439 PGMACCESSORIGIN_IEM);
16440 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit1, iMemMap, pVCpu);
16441 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysFirst=%RGp LB %#x %Rrc => %Rrc\n",
16442 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, cbFirst,
16443 VBOXSTRICTRC_VAL(rcStrictCommit1), VBOXSTRICTRC_VAL(rcStrict)));
16444 }
16445
16446 if (pVCpu->iem.s.aMemMappings[iMemMap].fAccess & IEM_ACCESS_PENDING_R3_WRITE_2ND)
16447 {
16448 VBOXSTRICTRC rcStrictCommit2 = PGMPhysWrite(pVM,
16449 pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond,
16450 pbBuf + cbFirst,
16451 cbSecond,
16452 PGMACCESSORIGIN_IEM);
16453 rcStrict = iemR3MergeStatus(rcStrict, rcStrictCommit2, iMemMap, pVCpu);
16454 Log(("IEMR3ProcessForceFlag: iMemMap=%u GCPhysSecond=%RGp LB %#x %Rrc => %Rrc\n",
16455 iMemMap, pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond, cbSecond,
16456 VBOXSTRICTRC_VAL(rcStrictCommit2), VBOXSTRICTRC_VAL(rcStrict)));
16457 }
16458 cBufs++;
16459 pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID;
16460 }
16461
16462 AssertMsg(cBufs > 0 && cBufs == pVCpu->iem.s.cActiveMappings,
16463 ("cBufs=%u cActiveMappings=%u - %#x %#x %#x\n", cBufs, pVCpu->iem.s.cActiveMappings,
16464 pVCpu->iem.s.aMemMappings[0].fAccess, pVCpu->iem.s.aMemMappings[1].fAccess, pVCpu->iem.s.aMemMappings[2].fAccess));
16465 pVCpu->iem.s.cActiveMappings = 0;
16466 return rcStrict;
16467}
16468
16469#endif /* IN_RING3 */
16470
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette